diff --git a/spaces/123Kumar/vits-uma-genshin-honkai123/transforms.py b/spaces/123Kumar/vits-uma-genshin-honkai123/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/123Kumar/vits-uma-genshin-honkai123/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Fix Generator V.2.0 Samsungl.md b/spaces/1gistliPinn/ChatGPT4/Examples/Fix Generator V.2.0 Samsungl.md deleted file mode 100644 index df2e3883fbe3cc77519584a058d594136c5e7d55..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Fix Generator V.2.0 Samsungl.md +++ /dev/null @@ -1,9 +0,0 @@ -
-

Finally, since some people don’t like documentation and don’t want to read, we allow you to instantly see the output of our generators by downloading this draft and running the python script in the top-left corner of your browser. An article about our work can be found here:

-

Fix Generator V.2.0 Samsungl


DOWNLOAD 🆓 https://imgfil.com/2uxZJw



-

Solar generators are the most expensive option, costing seven times as much as a standard fuel-powered generator. Price isnt the only issue. With fuel-powered generators, the output is consistent and guaranteed. However, solar generators require sunlight can be affected by things like cloud cover, placement location, and the length of the dayso they are nowhere near as reliable as their fossil fuel counterparts. Solar generators do store power in a power bank, which manufacturers hope will get you through any cloudy patches. But the power bank wont charge when you are operating at capacity.

-

A conventional generators main benefit over the other types listed in this article is power output. While there is a whole range of conventional generators, they usually have an output of at least 4,000 Watts and up to around 12,000 Watts. While thats overkill if you want to hook up a sound system for a family BBQ, its ideal if youre going to power multiple large appliances during a power outage. They are also cheaper than inverter or solar generators.

-

The traditional list of uses of generators is often long. Powering something that needs power when the sun doesnt shine or when the power grid is down is the most common. A generator provides ongoing and predictable power during a power outage. A generator provides power for things such as running a home lighting system at night. It can provide power for lights when batteries run out or for power tools when the AC power isnt available. It can provide power to water pumps and pump stations during a power failure. It can charge a cell phone or other electronic devices when the grid is down and when the power isnt provided by the grid. A generator can power a lantern during a storm.

-

899543212b
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/README.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/README.md deleted file mode 100644 index b5434136c4802fec5a20b22359473b60ddc6c434..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ChatGPT Prompt Generator -emoji: 👨🏻‍🎤 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: umair007/ChatGPT-prompt-generator ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dummynation Mod APK with Unlimited Troops and No Ads.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dummynation Mod APK with Unlimited Troops and No Ads.md deleted file mode 100644 index e136697066c58368e78566e1ba7c1588fc5a3c2c..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dummynation Mod APK with Unlimited Troops and No Ads.md +++ /dev/null @@ -1,106 +0,0 @@ - -

Dummynation Mod APK Unlimited Troops: How to Conquer the World with Ease

-

Do you love strategy games where you can control a country and lead it to world domination? If so, you might want to check out Dummynation, a game that gives you unlimited power over a country with a single promise to fulfill: world domination. But how do you manage to achieve it? That's up to you. You can expand your territory by military occupation, analyze and manipulate diplomatic relations, use your country's resources to sustain your research and military campaigns, and determine your country's economic policy. Sounds exciting, right? But what if we tell you that there is a way to make it even more exciting? That's right, we are talking about Dummynation Mod APK Unlimited Troops, a modded version of the game that removes ads and adds new features, such as unlimited troops. In this article, we will tell you everything you need to know about this mod, how to download and install it, how to play it, and some tips and tricks to help you conquer the world with ease.

-

dummynation mod apk unlimited troops


Download Zip ☆☆☆ https://urlin.us/2uT1WO



-

What is Dummynation?

-

Dummynation is a strategy game where you have unlimited power over a country, with a single promise to fulfill: world domination. How you manage to achieve it is up to you.

-

A strategy game where you have unlimited power over a country

-

In Dummynation, you can choose any country in the world to start with, and customize your leader's name and appearance. You can then use the map to select a target country and send your troops to occupy it. You can also monitor your power, relations, resources and economy on the dashboard, and use research and policy options to improve your country's performance and influence.

-

The goal is to achieve world domination by expanding your territory, manipulating diplomacy, managing resources and economy

-

The ultimate goal of Dummynation is to achieve world domination by any means necessary. You can expand your territory by invading other countries with your troops, or by forming alliances and treaties with them. You can also manipulate diplomatic relations by using propaganda, espionage, sabotage, or bribery. You can manage your resources by allocating them to different sectors, such as military, research, or economy. You can also determine your economic policy by setting taxes, tariffs, subsidies, or trade agreements. The game offers a lot of freedom and flexibility in how you want to play and achieve your goal.

-

What is Dummynation Mod APK Unlimited Troops?

-

Dummynation Mod APK Unlimited Troops is a modded version of the game that removes ads and adds new features, such as unlimited troops. The main feature is unlimited troops, which allows you to invade any country without worrying about casualties or costs.

-

dummynation mod apk no ads
-dummynation mod apk latest version
-dummynation mod apk free download
-dummynation mod apk unlimited gems
-dummynation mod apk android
-dummynation mod apk happymod
-dummynation mod apk world domination
-dummynation mod apk strategy game
-dummynation mod apk unlimited power
-dummynation mod apk military occupation
-dummynation mod apk diplomatic relations
-dummynation mod apk resource management
-dummynation mod apk economic policy
-dummynation mod apk new weapons
-dummynation mod apk updated graphics
-dummynation mod apk new levels
-dummynation mod apk easy install
-dummynation mod apk compatible devices
-dummynation mod apk anti-ban mechanism
-dummynation mod apk unlock characters
-dummynation mod apk offline mode
-dummynation mod apk multiplayer mode
-dummynation mod apk custom country
-dummynation mod apk realistic simulation
-dummynation mod apk historical scenarios
-dummynation mod apk random events
-dummynation mod apk achievements and leaderboards
-dummynation mod apk tips and tricks
-dummynation mod apk cheats and hacks
-dummynation mod apk reviews and ratings
-how to download dummynation mod apk unlimited troops
-how to play dummynation mod apk unlimited troops
-how to update dummynation mod apk unlimited troops
-how to uninstall dummynation mod apk unlimited troops
-how to backup and restore dummynation mod apk unlimited troops
-how to fix errors in dummynation mod apk unlimited troops
-how to contact developer of dummynation mod apk unlimited troops
-how to support developer of dummynation mod apk unlimited troops
-best alternatives to dummynation mod apk unlimited troops
-best strategies for dummynation mod apk unlimited troops
-best countries to play in dummynation mod apk unlimited troops
-best weapons to use in dummynation mod apk unlimited troops
-best allies and enemies in dummynation mod apk unlimited troops
-best resources to invest in dummynation mod apk unlimited troops
-best economic policies to adopt in dummynation mod apk unlimited troops
-best ways to achieve world domination in dummynation mod apk unlimited troops
-best ways to avoid war in dummynation mod apk unlimited troops
-best ways to win war in dummynation mod apk unlimited troops
-best ways to have fun in dummynation mod apk unlimited troops

-

A modded version of the game that removes ads and adds new features

-

Dummynation Mod APK Unlimited Troops is a modified version of the original game that removes annoying ads and adds new features that enhance the gameplay. The modded version is not available on the official app store, but you can download it from a reliable source online. The modded version does not require root access or any special permissions to install and run.

-

The main feature is unlimited troops, which allows you to invade any country without worrying about casualties or costs

-

The main feature of Dummynation Mod APK Unlimited Troops is unlimited troops, which means you can send as many troops as you want to any country you want to invade. You don't have to worry about losing troops or spending money on them. You can also use different types of troops, such as infantry, tanks, planes, ships, or missiles. This feature gives you a huge advantage over your enemies and makes it easier to conquer the world.

-

How to download and install Dummynation Mod APK Unlimited Troops?

-

Downloading and installing Dummynation Mod APK Unlimited Troops is easy and simple. Just follow these steps:

-

Download the modded APK file from a reliable source

-

The first step is to download the modded APK file from a reliable source online. You can search for Dummynation Mod APK Unlimited Troops on Google or any other search engine and find a link that offers a safe and secure download. Make sure you download the latest version of the mod that is compatible with your device.

-

Enable unknown sources on your device settings

-

The next step is to enable unknown sources on your device settings. This will allow you to install apps that are not from the official app store. To do this, go to your device settings and look for security or privacy options. Then find the option that says unknown sources or allow installation from unknown sources and turn it on.

-

Install the APK file and launch the game

-

The final step is to install the APK file and launch the game. To do this, locate the downloaded APK file on your device storage and tap on it. Follow the instructions on the screen to complete the installation process. Once done, you can launch the game from your app drawer or home screen and enjoy playing Dummynation Mod APK Unlimited Troops.

How to play Dummynation Mod APK Unlimited Troops?

-

Playing Dummynation Mod APK Unlimited Troops is fun and easy. Just follow these steps:

-

Choose a country to start with and customize your leader's name and appearance

-

The first step is to choose a country to start with and customize your leader's name and appearance. You can choose any country in the world, from the USA to China, from Russia to Brazil, from India to Australia. You can also change your leader's name, gender, hair, skin, and clothes. You can make your leader look like yourself, a famous person, or a fictional character. The choice is yours.

-

Use the map to select a target country and send your troops to occupy it

-

The next step is to use the map to select a target country and send your troops to occupy it. You can zoom in and out of the map and see the details of each country, such as its name, flag, population, power, relations, resources, and economy. You can also see the color of each country, which indicates its status: green for allies, red for enemies, yellow for neutral, and blue for yourself. To select a target country, simply tap on it and see its information on the bottom of the screen. To send your troops to occupy it, tap on the attack button and choose the type and number of troops you want to send. You can use unlimited troops, so don't be afraid to send as many as you want.

-

Monitor your power, relations, resources and economy on the dashboard

-

The third step is to monitor your power, relations, resources and economy on the dashboard. The dashboard is located on the top of the screen and shows you important information about your country and the world. You can see your power level, which indicates how strong you are compared to other countries. You can also see your relations with other countries, which indicates how friendly or hostile they are towards you. You can also see your resources, which include food, water, oil, metal, uranium, and money. You can use your resources to sustain your research and military campaigns. You can also see your economy, which includes your income and expenses. You can use your economy to determine your tax rate, trade agreements, subsidies, and tariffs.

-

Use research and policy options to improve your country's performance and influence

-

The fourth step is to use research and policy options to improve your country's performance and influence. You can access these options by tapping on the menu button on the top right corner of the screen. You can then choose between research or policy options. Research options allow you to unlock new technologies that can improve your military, economy, or diplomacy. For example, you can research nuclear weapons that can destroy entire countries in one strike. Policy options allow you to set your country's stance on various issues that can affect your relations with other countries. For example, you can set your policy on human rights that can make you more popular or unpopular among other countries.

Tips and tricks for Dummynation Mod APK Unlimited Troops

-

Playing Dummynation Mod APK Unlimited Troops can be a lot of fun, but also challenging. Here are some tips and tricks to help you conquer the world with ease:

-

Balance your expansion and diplomacy to avoid creating too many enemies

-

While it may be tempting to use your unlimited troops to invade every country you see, you should also consider the consequences of your actions. If you create too many enemies, you may face a coalition of countries that will try to stop you. You may also lose the support of your allies, who may turn against you or abandon you. Therefore, you should balance your expansion and diplomacy to avoid creating too many enemies. You can do this by forming alliances with other countries, respecting their sovereignty, honoring your treaties, and avoiding unnecessary conflicts. You can also use diplomacy to persuade or intimidate other countries to join you or surrender to you.

-

Use your unlimited troops wisely and strategically to overcome stronger opponents

-

Even though you have unlimited troops, you should still use them wisely and strategically to overcome stronger opponents. You should not just send your troops blindly to any country, but rather plan your attacks carefully and choose the best type and number of troops for each situation. You should also consider the terrain, weather, distance, and defense of each country before attacking them. You should also use different types of troops, such as infantry, tanks, planes, ships, or missiles, to exploit the weaknesses of your enemies and gain an advantage over them.

-

Invest in research and economy to gain an edge over your rivals

-

Besides using your unlimited troops, you should also invest in research and economy to gain an edge over your rivals. Research can help you unlock new technologies that can improve your military, economy, or diplomacy. For example, you can research nuclear weapons that can destroy entire countries in one strike, or stealth technology that can make your troops invisible to radar. Economy can help you increase your income and reduce your expenses. For example, you can increase your tax rate, trade agreements, subsidies, or tariffs to boost your revenue, or reduce your military spending, welfare spending, or debt payments to lower your costs.

-

Explore new levels and areas to discover new challenges and rewards

-

Dummynation Mod APK Unlimited Troops offers a lot of variety and replay value by providing different levels and areas to explore. Each level has a different difficulty and objective, such as conquering a continent, a region, or the whole world. Each area has a different theme and design, such as Europe, Asia, Africa, America, or Antarctica. By exploring new levels and areas, you can discover new challenges and rewards that will keep you entertained and motivated.

-

Conclusion

-

Dummynation Mod APK Unlimited Troops is a fun and addictive game that lets you experience the thrill of world domination. The modded version enhances the gameplay by removing ads and adding unlimited troops and other features. The game is easy to download, install and play, and offers hours of entertainment for strategy lovers. If you are looking for a game that will challenge your strategic skills and satisfy your desire for power, then Dummynation Mod APK Unlimited Troops is the game for you.

-

FAQs

-

Is Dummynation Mod APK Unlimited Troops safe to use?

-

Dummynation Mod APK Unlimited Troops is safe to use as long as you download it from a reliable source online. The modded version does not contain any viruses or malware that can harm your device or data. However, you should always be careful when downloading any app from unknown sources and scan it with an antivirus before installing it.

-

What are the benefits of using Dummynation Mod APK Unlimited Troops?

-

The benefits of using Dummynation Mod APK Unlimited Troops are that it removes ads and adds unlimited troops and other features that enhance the gameplay. By using this mod, you can enjoy playing Dummynation without any interruptions or limitations. You can also have more fun and freedom in conquering the world with unlimited troops.

-

How can I update Dummynation Mod APK Unlimited Troops?

-

You can update Dummynation Mod APK Unlimited Troops by downloading the latest version of the mod from a reliable source online. You can then install it over the existing version without losing your progress or data. You should always update the mod whenever there is a new version available to ensure compatibility and performance.

-

Can I play Dummynation Mod APK Unlimited Troops offline?

-

Yes, you can play Dummynation Mod APK Unlimited Troops offline without any internet connection. The game does not require any internet connection to run or save your progress. However, you may need an internet connection to download and install the mod, or to access some online features, such as leaderboards or achievements.

-

Can I share my progress and achievements with other players?

-

Yes, you can share your progress and achievements with other players by using the social media buttons on the game. You can also compare your scores and rankings with other players on the leaderboards or achievements. You can also challenge your friends or other players to see who can conquer the world faster or better.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/.md b/spaces/1phancelerku/anime-remove-background/.md deleted file mode 100644 index 4006a4466de3dcff3afbdcd20584f5220ec62af3..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/.md +++ /dev/null @@ -1,44 +0,0 @@ -## Warman Crack With Full Game - - - - - - WORK - - - -**Click Here ✅ [https://vittuv.com/2tBMxo](https://vittuv.com/2tBMxo)** - - - - - - - - - - - - - -Ansys Discovery Student is a cutting-edge product design software for students that leverages our instantaneous simulation technology. It allows you to create and modify geometry models easily with Ansys SpaceClaim technology, which is a direct modeling tool that eliminates the need for complex CAD operations. It also enables you to perform thermal, structural and fluids simulations in real time with completely meshless and interactive solvers. With Ansys Discovery Student, you can explore and understand physics concepts without spending a lot of time learning how to use a complicated simulation tool. - - - -Ansys Discovery Student is ideal for students who want to learn about product design and engineering in a fun and intuitive way. You can experiment with different design scenarios and see how they affect the performance and behavior of your product. You can also compare different physics phenomena and discover how they interact with each other. For example, you can see how heat transfer affects the stress and deformation of a metal part, or how fluid flow affects the aerodynamics and lift of a wing. - - - -Ansys Discovery Student is also a great tool for students who want to prepare for their future careers in engineering and design. You can use it to create impressive projects and portfolios that showcase your skills and creativity. You can also use it to collaborate with your classmates and instructors and get feedback on your work. Ansys Discovery Student is compatible with other Ansys products, so you can easily export your models and simulations to other tools for further analysis and optimization. - - - -Ansys Discovery Student is free to download and use for academic purposes. You can install it on your personal computer or laptop and access it anytime and anywhere. You can also access online tutorials, videos, webinars and community forums to help you get started and learn more about the software. Ansys Discovery Student is the ultimate product design software for students who want to learn by doing and have fun along the way. - - 145887f19f - - - - - diff --git a/spaces/1phancelerku/anime-remove-background/Bus Simulator Indonesia Mod APK A Game that Combines Simulation Adventure and Education.md b/spaces/1phancelerku/anime-remove-background/Bus Simulator Indonesia Mod APK A Game that Combines Simulation Adventure and Education.md deleted file mode 100644 index 9dcc71058a7b7005997d3bb17ae578bb5a22fb63..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Bus Simulator Indonesia Mod APK A Game that Combines Simulation Adventure and Education.md +++ /dev/null @@ -1,101 +0,0 @@ -
-

Game Bus Simulator Indonesia Mod APK: A Fun and Realistic Driving Experience

-

Do you love driving games? Do you want to explore the beautiful and diverse cities of Indonesia? Do you want to customize your own bus and drive it on realistic roads? If you answered yes to any of these questions, then you should try Game Bus Simulator Indonesia Mod APK. This is a modified version of the popular game Bus Simulator Indonesia, which lets you enjoy unlimited money, fuel, and other features that make the game more fun and exciting. In this article, we will tell you everything you need to know about Game Bus Simulator Indonesia Mod APK, including its features, how to download and install it, and its pros and cons.

-

What is Game Bus Simulator Indonesia Mod APK?

-

Game Bus Simulator Indonesia Mod APK is based on driving a bus in various cities of Indonesia to perform various tasks on your android phone. In this game, you need to pick up the passengers from different areas of an Indonesian city and drop them at the destination. You can also drive freely around the city and enjoy the scenery. You can choose from different types of buses, such as mini buses, double-decker buses, or luxury buses. You can also customize your bus with different skins, stickers, horns, lights, and more. You can also experience realistic traffic, weather, day and night cycles, and other aspects of driving in Indonesia.

-

game bus simulator indonesia mod apk


Download ☆☆☆ https://jinyurl.com/2uNJO1



-

Features of Game Bus Simulator Indonesia Mod APK

-

- Unlimited money and fuel

-

One of the best features of Game Bus Simulator Indonesia Mod APK is that it gives you unlimited money and fuel. This means that you can buy any bus you want, upgrade it with any accessories you like, and drive it as long as you want without worrying about running out of gas. You can also use the money to unlock new cities, modes, and missions in the game.

-

- Customizable buses and skins

-

Another great feature of Game Bus Simulator Indonesia Mod APK is that it allows you to customize your buses with different skins and accessories. You can change the color, design, logo, name, number plate, and more of your bus. You can also add stickers, horns, lights, mirrors, spoilers, exhausts, and more to your bus. You can make your bus look unique and stylish according to your preference.

-

- Realistic traffic and weather

-

Game Bus Simulator Indonesia Mod APK also offers realistic traffic and weather conditions in the game. You will encounter different types of vehicles on the road, such as cars, trucks, motorcycles, bicycles, rickshaws, etc. You will also have to follow the traffic rules and signals, such as speed limits, stop signs, red lights, etc. You will also experience different weather effects, such as rain, fog, sun, wind, etc. You will have to adjust your driving accordingly to avoid accidents and delays.

-

- Various modes and missions

-

Game Bus Simulator Indonesia Mod APK also provides various modes and missions for you to enjoy. You can choose from free mode, career mode, or multiplayer mode. In free mode, you can drive anywhere you want without any restrictions or objectives. In career mode, you have to complete different tasks and challenges to earn money and reputation. In multiplayer mode, you can play with other players online and compete with them in races or other events. You can also chat with them using the built-in voice chat feature.

-

How to download and install Game Bus Simulator Indonesia Mod APK?

-

Requirements for Game Bus Simulator Indonesia Mod APK

Steps to download and install Game Bus Simulator Indonesia Mod APK

-

If you want to download and install Game Bus Simulator Indonesia Mod APK on your android device, you need to follow these simple steps:

-
    -
  1. Click on the download link to get the Game Bus Simulator Indonesia Mod APK file.
  2. -
  3. Allow the installation of unknown sources on your device by going to Settings > Security > Unknown Sources.
  4. -
  5. Locate the downloaded file in your file manager and tap on it to start the installation process.
  6. -
  7. Follow the instructions on the screen and wait for the installation to complete.
  8. -
  9. Launch the game and enjoy driving your bus in Indonesia.
  10. -
-

Pros and cons of Game Bus Simulator Indonesia Mod APK

-

Game Bus Simulator Indonesia Mod APK is a fun and realistic driving game that lets you experience the culture and scenery of Indonesia. However, like any other game, it also has some pros and cons that you should consider before playing it. Here are some of them:

-

Pros of Game Bus Simulator Indonesia Mod APK

- -

Cons of Game Bus Simulator Indonesia Mod APK

- -

Conclusion

-

Game Bus Simulator Indonesia Mod APK is a great game for anyone who loves driving games and wants to explore the beautiful and diverse cities of Indonesia. It offers unlimited money, fuel, customization, realism, variety, and multiplayer features that make the game more fun and exciting. However, it also has some drawbacks, such as compatibility issues, bugs, internet requirements, and lack of updates. Therefore, you should weigh the pros and cons before downloading and installing it on your device. If you are looking for a fun and realistic driving experience in Indonesia, then you should give Game Bus Simulator Indonesia Mod APK a try.

-

FAQs

-

Here are some frequently asked questions about Game Bus Simulator Indonesia Mod APK:

-

game bus simulator indonesia mod apk unlimited money
-game bus simulator indonesia mod apk download latest version
-game bus simulator indonesia mod apk offline
-game bus simulator indonesia mod apk 2021
-game bus simulator indonesia mod apk free shopping
-game bus simulator indonesia mod apk revdl
-game bus simulator indonesia mod apk terbaru
-game bus simulator indonesia mod apk android 1
-game bus simulator indonesia mod apk unlimited fuel
-game bus simulator indonesia mod apk hack
-game bus simulator indonesia mod apk obb
-game bus simulator indonesia mod apk rexdl
-game bus simulator indonesia mod apk no ads
-game bus simulator indonesia mod apk update
-game bus simulator indonesia mod apk full unlocked
-game bus simulator indonesia mod apk unlimited everything
-game bus simulator indonesia mod apk data
-game bus simulator indonesia mod apk pure
-game bus simulator indonesia mod apk happymod
-game bus simulator indonesia mod apk all buses unlocked
-game bus simulator indonesia mod apk cheat
-game bus simulator indonesia mod apk new version
-game bus simulator indonesia mod apk online
-game bus simulator indonesia mod apk an1
-game bus simulator indonesia mod apk unlimited diamond
-game bus simulator indonesia mod apk latest
-game bus simulator indonesia mod apk original
-game bus simulator indonesia mod apk lenov.ru
-game bus simulator indonesia mod apk old version
-game bus simulator indonesia mod apk unlimited coin
-game bus simulator indonesia mod apk versi lama
-game bus simulator indonesia mod apk mega
-game bus simulator indonesia mod apk pro
-game bus simulator indonesia mod apk premium
-game bus simulator indonesia mod apk vip
-game bus simulator indonesia mod apk plus
-game bus simulator indonesia mod apk 2020
-game bus simulator indonesia mod apk android oyun club
-game bus simulator indonesia mod apk andropalace
-game bus simulator indonesia mod apk apkpure.com

-
    -
  1. Is Game Bus Simulator Indonesia Mod APK safe to download and install?
  2. -

    Yes, Game Bus Simulator Indonesia Mod APK is safe to download and install as long as you get it from a trusted source. However, you should always scan the file for viruses or malware before installing it on your device.

    -
  3. What is the difference between Game Bus Simulator Indonesia Mod APK and the original game?
  4. -

    The main difference between Game Bus Simulator Indonesia Mod APK and the original game is that the modded version gives you unlimited money, fuel, customization, and other features that are not available in the original game. The modded version also bypasses some restrictions or limitations that are imposed by the original game.

    -
  5. Can I play Game Bus Simulator Indonesia Mod APK offline?
  6. -

    You can play Game Bus Simulator Indonesia Mod APK offline in free mode or career mode. However, you will need an internet connection to play multiplayer mode or access some online features or events.

    -
  7. How can I update Game Bus Simulator Indonesia Mod APK?
  8. -

    You can update Game Bus Simulator Indonesia Mod APK by downloading and installing the latest version from the same source. However, you should always back up your data before updating to avoid losing your progress or settings.

    -
  9. How can I contact the developers of Game Bus Simulator Indonesia Mod APK?
  10. -

    You can contact the developers of Game Bus Simulator Indonesia Mod APK by visiting their official website or their social media pages. You can also leave a comment or review on their download page or send them an email at support@maleo.id.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download and Install Instagram 4.0 2 APK - The Best Way to Share Your Photos and Videos.md b/spaces/1phancelerku/anime-remove-background/Download and Install Instagram 4.0 2 APK - The Best Way to Share Your Photos and Videos.md deleted file mode 100644 index e58f17d6199b48b5cad383c40fe66d11b2974df9..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download and Install Instagram 4.0 2 APK - The Best Way to Share Your Photos and Videos.md +++ /dev/null @@ -1,127 +0,0 @@ -
-

Download Instagram 4.0 2 APK: How to Get the Latest Version of the Popular Social Media App

-

Do you love sharing your photos, videos, stories, reels, and more with your friends and followers on Instagram? Do you want to get the latest features and updates of the app without waiting for the official release on Google Play Store? If yes, then you might be interested in downloading Instagram 4.0 2 APK, which is the latest version of the app as of June 2023. In this article, we will explain what Instagram is, what an APK file is, and how to download and install Instagram 4.0 2 APK on your Android device.

-

download instagram 4.0 2 apk


DOWNLOAD > https://jinyurl.com/2uNPFR



-

What is Instagram and why do you need it?

-

Instagram is one of the most popular social media apps in the world, with over one billion monthly active users. It allows you to create and share your photos, videos, stories, reels, live broadcasts, IGTV videos, and more with the people you care about. You can also discover new content from other users, celebrities, brands, and influencers that match your interests. You can also chat with your friends, send voice messages, video calls, stickers, GIFs, and more through Instagram Direct. You can also shop for products, watch videos, play games, and access other apps through Instagram.

-

Instagram features and benefits

-

Some of the features and benefits of using Instagram are:

- -

Instagram requirements and compatibility

-

To use Instagram on your Android device, you need to have:

-

download instagram 4.0 2 apk for android
-download instagram 4.0 2 apk latest version
-download instagram 4.0 2 apk free
-download instagram 4.0 2 apk mod
-download instagram 4.0 2 apk old version
-download instagram 4.0 2 apk file
-download instagram 4.0 2 apk from google play
-download instagram 4.0 2 apk update
-download instagram 4.0 2 apk beta
-download instagram 4.0 2 apk mirror
-download instagram 4.0 2 apk offline
-download instagram 4.0 2 apk cracked
-download instagram 4.0 2 apk hack
-download instagram 4.0 2 apk no ads
-download instagram 4.0 2 apk premium
-download instagram 4.0 2 apk pro
-download instagram 4.0 2 apk full
-download instagram 4.0 2 apk unlocked
-download instagram 4.0 2 apk original
-download instagram 4.0 2 apk safe
-download instagram 4.0 2 apk direct link
-download instagram 4.0 2 apk for pc
-download instagram 4.0 2 apk for ios
-download instagram 4.0 2 apk for windows
-download instagram 4.0 2 apk for mac
-download instagram 4.0 2 apk for tablet
-download instagram 4.0 2 apk for firestick
-download instagram 4.0 2 apk for smart tv
-download instagram 4.0 2 apk for chromebook
-download instagram 4.0 2 apk for huawei
-download instagram 4.0 2 apk for samsung
-download instagram 4.0 2 apk for xiaomi
-download instagram 4.0 2 apk for oppo
-download instagram 4.0 2 apk for vivo
-download instagram 4.0 2 apk for nokia
-download instagram 4.0 2 apk for lg
-download instagram 4.0 2 apk for sony
-download instagram 4.0 2 apk for oneplus
-download instagram

- -

What is an APK file and why do you need it?

-

An APK file is an Android Package Kit file that contains all the files and code needed to install an app on an Android device. It is similar to an EXE file on Windows or a DMG file on Mac. You can download APK files from various sources online, such as websites, blogs, forums, or app stores. However, not all APK files are safe or reliable. Some may contain malware or viruses that can harm your device or steal your personal information. Therefore, you need to be careful when downloading APK files from unknown sources.

-

AP

APK file definition and advantages

-

An APK file is an Android Package Kit file that contains all the files and code needed to install an app on an Android device. It is similar to an EXE file on Windows or a DMG file on Mac. You can download APK files from various sources online, such as websites, blogs, forums, or app stores. However, not all APK files are safe or reliable. Some may contain malware or viruses that can harm your device or steal your personal information. Therefore, you need to be careful when downloading APK files from unknown sources.

-

Some of the advantages of using APK files are:

- -

APK file risks and precautions

-

Some of the risks and precautions of using APK files are:

- -

How to download Instagram 4.0 2 APK?

-

If you want to download Instagram 4.0 2 APK, you need to follow these steps:

-

Step 1: Enable unknown sources on your device

-

Before you can install any APK file on your device, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message that says installing apps from unknown sources may harm your device. Tap OK to proceed.

-

Step 2: Find a reliable source for the APK file

-

The next step is to find a reliable source for the Instagram 4.0 2 APK file. You can search online for websites, blogs, forums, or app stores that offer the APK file. However, be careful not to download from shady or untrustworthy sites that may contain malware or viruses. You can also check the reviews, ratings, comments, and feedback from other users who have downloaded the APK file before. You can also scan the APK file with an antivirus app before installing it.

-

Step 3: Download and install the APK file

-

Once you have found a reliable source for the Instagram 4.0 2 APK file, you can download it to your device. You may need to grant permission for the browser or app to download the file. After the download is complete, you can open the file and tap Install. You may see a message that says installing this app may harm your device. Tap Install Anyway to continue. Wait for the installation process to finish.

-

Step 4: Launch and enjoy Instagram 4.0 2

-

After the installation is done, you can launch Instagram 4.0 2 from your app drawer or home screen. You can sign in with your existing account or create a new one if you don't have one yet. You can then enjoy all the features and updates of Instagram 4.0 2 on your device.

-

Conclusion

-

In this article, we have explained what Instagram is, what an APK file is, and how to download and install Instagram 4.0 2 APK on your Android device. We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below.

-

FAQs

-

Here are some frequently asked questions about Instagram 4.0 2 APK:

-
    -
  1. Is Instagram 4.0 2 APK safe?
  2. -

    Instagram 4.0 2 APK is safe as long as you download it from a reliable source and scan it with an antivirus app before installing it. However, there is always a risk of downloading fake or malicious apps from unknown sources, so be careful and use your own discretion.

    -
  3. What are the new features of Instagram 4.0
  4. What are the new features of Instagram 4.0 2 APK?
  5. -

    Instagram 4.0 2 APK has some new features and improvements, such as:

    - -
  6. How to update Instagram 4.0 2 APK?
  7. -

    To update Instagram 4.0 2 APK, you need to download the latest version of the APK file from a reliable source and install it on your device. You may need to uninstall the previous version of the app before installing the new one. Alternatively, you can wait for the official update on Google Play Store, which may take some time to be available.

    -
  8. How to uninstall Instagram 4.0 2 APK?
  9. -

    To uninstall Instagram 4.0 2 APK, you need to go to Settings > Apps > Instagram and tap Uninstall. You may also need to delete the APK file from your device storage. If you want to reinstall the app, you can download it from Google Play Store or another source.

    -
  10. How to contact Instagram support?
  11. -

    If you have any issues or problems with Instagram, you can contact Instagram support through the following ways:

    -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/2ndelement/voicevox/voicevox_engine/cancellable_engine.py b/spaces/2ndelement/voicevox/voicevox_engine/cancellable_engine.py deleted file mode 100644 index 1bedb3ff3ebce858d8c585cf8b0d121a4d816210..0000000000000000000000000000000000000000 --- a/spaces/2ndelement/voicevox/voicevox_engine/cancellable_engine.py +++ /dev/null @@ -1,220 +0,0 @@ -import argparse -import asyncio -import queue -from multiprocessing import Pipe, Process -from multiprocessing.connection import Connection -from tempfile import NamedTemporaryFile -from typing import List, Optional, Tuple - -import soundfile - -# FIXME: remove FastAPI dependency -from fastapi import HTTPException, Request - -from .model import AudioQuery -from .synthesis_engine import make_synthesis_engines -from .utility import get_latest_core_version - - -class CancellableEngine: - """ - 音声合成のキャンセル機能に関するクラス - 初期化後は、synthesis関数で音声合成できる - (オリジナルと比べ引数が増えているので注意) - - Attributes - ---------- - watch_con_list: List[Tuple[Request, Process]] - Requestは接続の監視に使用され、Processは通信切断時のプロセスキルに使用される - クライアントから接続があるとListにTupleが追加される - 接続が切断、もしくは音声合成が終了すると削除される - procs_and_cons: queue.Queue[Tuple[Process, Connection]] - 音声合成の準備が終わっているプロセスのList - (音声合成中のプロセスは入っていない) - """ - - def __init__(self, args: argparse.Namespace) -> None: - """ - 変数の初期化を行う - また、args.init_processesの数だけプロセスを起動し、procs_and_consに格納する - """ - self.args = args - if not self.args.enable_cancellable_synthesis: - raise HTTPException( - status_code=404, - detail="実験的機能はデフォルトで無効になっています。使用するには引数を指定してください。", - ) - - self.watch_con_list: List[Tuple[Request, Process]] = [] - self.procs_and_cons: queue.Queue[Tuple[Process, Connection]] = queue.Queue() - for _ in range(self.args.init_processes): - self.procs_and_cons.put(self.start_new_proc()) - - def start_new_proc( - self, - ) -> Tuple[Process, Connection]: - """ - 新しく開始したプロセスを返す関数 - - Returns - ------- - ret_proc: Process - 新規のプロセス - sub_proc_con1: Connection - ret_procのプロセスと通信するためのPipe - """ - sub_proc_con1, sub_proc_con2 = Pipe(True) - ret_proc = Process( - target=start_synthesis_subprocess, - kwargs={ - "args": self.args, - "sub_proc_con": sub_proc_con2, - }, - daemon=True, - ) - ret_proc.start() - return ret_proc, sub_proc_con1 - - def finalize_con( - self, - req: Request, - proc: Process, - sub_proc_con: Optional[Connection], - ) -> None: - """ - 接続が切断された時の処理を行う関数 - watch_con_listからの削除、プロセスの後処理を行う - プロセスが生きている場合はそのままprocs_and_consに加える - 死んでいる場合は新しく生成したものをprocs_and_consに加える - - Parameters - ---------- - req: fastapi.Request - 接続確立時に受け取ったものをそのまま渡せばよい - https://fastapi.tiangolo.com/advanced/using-request-directly/ - proc: Process - 音声合成を行っていたプロセス - sub_proc_con: Connection, optional - 音声合成を行っていたプロセスとのPipe - 指定されていない場合、プロセスは再利用されず終了される - """ - try: - self.watch_con_list.remove((req, proc)) - except ValueError: - pass - try: - if not proc.is_alive() or sub_proc_con is None: - proc.close() - raise ValueError - # プロセスが死んでいない場合は再利用する - self.procs_and_cons.put((proc, sub_proc_con)) - except ValueError: - # プロセスが死んでいるので新しく作り直す - self.procs_and_cons.put(self.start_new_proc()) - - def _synthesis_impl( - self, - query: AudioQuery, - speaker_id: int, - request: Request, - core_version: Optional[str], - ) -> str: - """ - 音声合成を行う関数 - 通常エンジンの引数に比べ、requestが必要になっている - また、返り値がファイル名になっている - - Parameters - ---------- - query: AudioQuery - speaker_id: int - request: fastapi.Request - 接続確立時に受け取ったものをそのまま渡せばよい - https://fastapi.tiangolo.com/advanced/using-request-directly/ - core_version: str - - Returns - ------- - f_name: str - 生成された音声ファイルの名前 - """ - proc, sub_proc_con1 = self.procs_and_cons.get() - self.watch_con_list.append((request, proc)) - try: - sub_proc_con1.send((query, speaker_id, core_version)) - f_name = sub_proc_con1.recv() - except EOFError: - raise HTTPException(status_code=422, detail="既にサブプロセスは終了されています") - except Exception: - self.finalize_con(request, proc, sub_proc_con1) - raise - - self.finalize_con(request, proc, sub_proc_con1) - return f_name - - async def catch_disconnection(self): - """ - 接続監視を行うコルーチン - """ - while True: - await asyncio.sleep(1) - for con in self.watch_con_list: - req, proc = con - if await req.is_disconnected(): - try: - if proc.is_alive(): - proc.terminate() - proc.join() - proc.close() - except ValueError: - pass - finally: - self.finalize_con(req, proc, None) - - -def start_synthesis_subprocess( - args: argparse.Namespace, - sub_proc_con: Connection, -): - """ - 音声合成を行うサブプロセスで行うための関数 - pickle化の関係でグローバルに書いている - - Parameters - ---------- - args: argparse.Namespace - 起動時に作られたものをそのまま渡す - sub_proc_con: Connection - メインプロセスと通信するためのPipe - """ - - synthesis_engines = make_synthesis_engines( - use_gpu=args.use_gpu, - voicelib_dirs=args.voicelib_dir, - voicevox_dir=args.voicevox_dir, - runtime_dirs=args.runtime_dir, - cpu_num_threads=args.cpu_num_threads, - enable_mock=args.enable_mock, - ) - assert len(synthesis_engines) != 0, "音声合成エンジンがありません。" - latest_core_version = get_latest_core_version(versions=synthesis_engines.keys()) - while True: - try: - query, speaker_id, core_version = sub_proc_con.recv() - if core_version is None: - _engine = synthesis_engines[latest_core_version] - elif core_version in synthesis_engines: - _engine = synthesis_engines[core_version] - else: - # バージョンが見つからないエラー - sub_proc_con.send("") - continue - wave = _engine._synthesis_impl(query, speaker_id) - with NamedTemporaryFile(delete=False) as f: - soundfile.write( - file=f, data=wave, samplerate=query.outputSamplingRate, format="WAV" - ) - sub_proc_con.send(f.name) - except Exception: - sub_proc_con.close() - raise diff --git a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/README.md b/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/README.md deleted file mode 100644 index c6fd17d778a9f9dbe7bf632c92e40e36e67b91d2..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Utils - -Scripts in this directory are used as utility functions. - -## BERT Pretrained Embeddings - -You can load pretrained word embeddings in Google [BERT](https://github.com/google-research/bert#pre-trained-models) instead of training word embeddings from scratch. The scripts in `utils/bert` need a BERT server in the background. We use BERT server from [bert-as-service](https://github.com/hanxiao/bert-as-service). - -To use bert-as-service, you need to first install the repository. It is recommended that you create a new environment with Tensorflow 1.3 to run BERT server since it is incompatible with Tensorflow 2.x. - -After successful installation of [bert-as-service](https://github.com/hanxiao/bert-as-service), downloading and running the BERT server needs to execute: - -```bash -bash scripts/prepare_bert_server.sh zh -``` - -By default, server based on BERT base Chinese model is running in the background. You can change to other models by changing corresponding model name and path in `scripts/prepare_bert_server.sh`. - -To extract BERT word embeddings, you need to execute `utils/bert/create_word_embedding.py`. diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/openai.py b/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/openai.py deleted file mode 100644 index 9911b6e135e51970177fcac067c12192b0b57c1c..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/openai.py +++ /dev/null @@ -1,129 +0,0 @@ -""" OpenAI pretrained model functions - -Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. -""" - -import os -import warnings -from typing import Union, List - -import torch - -from .model import build_model_from_openai_state_dict -from .pretrained import get_pretrained_url, list_pretrained_tag_models, download_pretrained - -__all__ = ["list_openai_models", "load_openai_model"] - - -def list_openai_models() -> List[str]: - """Returns the names of available CLIP models""" - return list_pretrained_tag_models('openai') - - -def load_openai_model( - name: str, - model_cfg, - device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", - jit=True, - cache_dir=os.path.expanduser("~/.cache/clip"), - enable_fusion: bool = False, - fusion_type: str = 'None' -): - """Load a CLIP model, preserve its text pretrained part, and set in the CLAP model - - Parameters - ---------- - name : str - A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict - device : Union[str, torch.device] - The device to put the loaded model - jit : bool - Whether to load the optimized JIT model (default) or more hackable non-JIT model. - - Returns - ------- - model : torch.nn.Module - The CLAP model - preprocess : Callable[[PIL.Image], torch.Tensor] - A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input - """ - if get_pretrained_url(name, 'openai'): - model_path = download_pretrained(get_pretrained_url(name, 'openai'), root=cache_dir) - elif os.path.isfile(name): - model_path = name - else: - raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}") - - try: - # loading JIT archive - model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval() - state_dict = None - except RuntimeError: - # loading saved state dict - if jit: - warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") - jit = False - state_dict = torch.load(model_path, map_location="cpu") - - if not jit: - try: - model = build_model_from_openai_state_dict(state_dict or model.state_dict(), model_cfg, enable_fusion, fusion_type).to(device) - except KeyError: - sd = {k[7:]: v for k, v in state_dict["state_dict"].items()} - model = build_model_from_openai_state_dict(sd, model_cfg, enable_fusion, fusion_type).to(device) - - if str(device) == "cpu": - model.float() - return model - - # patch the device names - device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) - device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] - - def patch_device(module): - try: - graphs = [module.graph] if hasattr(module, "graph") else [] - except RuntimeError: - graphs = [] - - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("prim::Constant"): - if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): - node.copyAttributes(device_node) - - model.apply(patch_device) - patch_device(model.encode_audio) - patch_device(model.encode_text) - - # patch dtype to float32 on CPU - if str(device) == "cpu": - float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) - float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] - float_node = float_input.node() - - def patch_float(module): - try: - graphs = [module.graph] if hasattr(module, "graph") else [] - except RuntimeError: - graphs = [] - - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("aten::to"): - inputs = list(node.inputs()) - for i in [1, 2]: # dtype can be the second or third argument to aten::to() - if inputs[i].node()["value"] == 5: - inputs[i].node().copyAttributes(float_node) - - model.apply(patch_float) - patch_float(model.encode_audio) - patch_float(model.encode_text) - model.float() - - model.audio_branch.audio_length = model.audio_cfg.audio_length - return model diff --git a/spaces/Abdllh/poetry2023/README.md b/spaces/Abdllh/poetry2023/README.md deleted file mode 100644 index fa5c6ad64f181ff6051745354b4af489527806f1..0000000000000000000000000000000000000000 --- a/spaces/Abdllh/poetry2023/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Poetry2023 -emoji: 👁 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.16.0 -app_file: app.py -pinned: false -duplicated_from: aaaaaabbbbbbbdddddddduuuuulllll/poetry2023 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring-plugin.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring-plugin.d.ts deleted file mode 100644 index b5320db02fb83b864997d0a125a06e76d586a604..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring-plugin.d.ts +++ /dev/null @@ -1,8 +0,0 @@ -import LZString from './lzstring'; - -export default class LZStringPlugin extends Phaser.Plugins.BasePlugin { - add( - config?: LZString.IConfig - ): LZString; - -} \ No newline at end of file diff --git a/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/README.md b/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/README.md deleted file mode 100644 index 76c690992c0a4ee15b2247436a306375a62c61d3..0000000000000000000000000000000000000000 --- a/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Thin Plate Spline Motion Model -emoji: 💩 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/AlexWang/lama/app.py b/spaces/AlexWang/lama/app.py deleted file mode 100644 index cd0e6aa3eaecdf05c2304ed9aaab3fc068fa2d23..0000000000000000000000000000000000000000 --- a/spaces/AlexWang/lama/app.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -os.system("wget https://huggingface.co/akhaliq/lama/resolve/main/best.ckpt") -os.system("pip install imageio") -os.system("pip install albumentations==0.5.2") -import cv2 -import paddlehub as hub -import gradio as gr -import torch -from PIL import Image, ImageOps -import numpy as np -import imageio -os.mkdir("data") -os.rename("best.ckpt", "models/best.ckpt") -os.mkdir("dataout") -model = hub.Module(name='U2Net') - - -def infer(img, mask, option): - print(type(img["image"]), img["image"].shape) - imageio.imwrite("./data/data.png", img["image"]) - if option == "Upload": - imageio.imwrite("./data/data_mask.png", mask) - elif option == "Automatic (U2net)": - result = model.Segmentation( - images=[cv2.cvtColor(img["image"], cv2.COLOR_RGB2BGR)], - paths=None, - batch_size=1, - input_size=320, - output_dir='output', - visualization=True) - im = Image.fromarray(result[0]['mask']) - im.save("./data/data_mask.png") - else: - imageio.imwrite("./data/data_mask.png", img["mask"]) - os.system('python predict.py model.path=/home/user/app/ indir=/home/user/app/data/ outdir=/home/user/app/dataout/ device=cpu') - return "./dataout/data_mask.png", "./data/data_mask.png" - - -inputs = [gr.Image(tool="sketch", label="Input", type="numpy"), - gr.Image(label="Mask", type="numpy"), - gr.inputs.Radio(choices=["Upload", "Manual", "Automatic (U2net)"], - type="value", default="Upload", label="Masking option")] -outputs = [gr.outputs.Image(type="file", label="output"), - gr.outputs.Image(type="file", label="Mask")] -title = "LaMa Image Inpainting" -description = "Gradio demo for LaMa: Resolution-robust Large Mask Inpainting with Fourier Convolutions. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Masks are generated by U^2net" -article = "

    Resolution-robust Large Mask Inpainting with Fourier Convolutions | Github Repo

    " -gr.Interface(infer, inputs, outputs, title=title, - description=description, article=article).launch() diff --git a/spaces/Amrrs/DragGan-Inversion/gen_images.py b/spaces/Amrrs/DragGan-Inversion/gen_images.py deleted file mode 100644 index 996bc12f4cde6ee9d0076446250ed076a04b2641..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/gen_images.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Generate images using pretrained network pickle.""" - -import os -import re -from typing import List, Optional, Tuple, Union - -import click -import dnnlib -import numpy as np -import PIL.Image -import torch - -import legacy - -# ---------------------------------------------------------------------------- - - -def parse_range(s: Union[str, List]) -> List[int]: - '''Parse a comma separated list of numbers or ranges and return a list of ints. - - Example: '1,2,5-10' returns [1, 2, 5, 6, 7] - ''' - if isinstance(s, list): - return s - ranges = [] - range_re = re.compile(r'^(\d+)-(\d+)$') - for p in s.split(','): - m = range_re.match(p) - if m: - ranges.extend(range(int(m.group(1)), int(m.group(2))+1)) - else: - ranges.append(int(p)) - return ranges - -# ---------------------------------------------------------------------------- - - -def parse_vec2(s: Union[str, Tuple[float, float]]) -> Tuple[float, float]: - '''Parse a floating point 2-vector of syntax 'a,b'. - - Example: - '0,1' returns (0,1) - ''' - if isinstance(s, tuple): - return s - parts = s.split(',') - if len(parts) == 2: - return (float(parts[0]), float(parts[1])) - raise ValueError(f'cannot parse 2-vector {s}') - -# ---------------------------------------------------------------------------- - - -def make_transform(translate: Tuple[float, float], angle: float): - m = np.eye(3) - s = np.sin(angle/360.0*np.pi*2) - c = np.cos(angle/360.0*np.pi*2) - m[0][0] = c - m[0][1] = s - m[0][2] = translate[0] - m[1][0] = -s - m[1][1] = c - m[1][2] = translate[1] - return m - -# ---------------------------------------------------------------------------- - - -@click.command() -@click.option('--network', 'network_pkl', help='Network pickle filename', required=True) -@click.option('--seeds', type=parse_range, help='List of random seeds (e.g., \'0,1,4-6\')', required=True) -@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True) -@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)') -@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True) -@click.option('--translate', help='Translate XY-coordinate (e.g. \'0.3,1\')', type=parse_vec2, default='0,0', show_default=True, metavar='VEC2') -@click.option('--rotate', help='Rotation angle in degrees', type=float, default=0, show_default=True, metavar='ANGLE') -@click.option('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR') -def generate_images( - network_pkl: str, - seeds: List[int], - truncation_psi: float, - noise_mode: str, - outdir: str, - translate: Tuple[float, float], - rotate: float, - class_idx: Optional[int] -): - """Generate images using pretrained network pickle. - - Examples: - - \b - # Generate an image using pre-trained AFHQv2 model ("Ours" in Figure 1, left). - python gen_images.py --outdir=out --trunc=1 --seeds=2 \\ - --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl - - \b - # Generate uncurated images with truncation using the MetFaces-U dataset - python gen_images.py --outdir=out --trunc=0.7 --seeds=600-605 \\ - --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-metfacesu-1024x1024.pkl - """ - - print('Loading networks from "%s"...' % network_pkl) - device = torch.device('cuda') - with dnnlib.util.open_url(network_pkl) as f: - G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore - # import pickle - # G = legacy.load_network_pkl(f) - # output = open('checkpoints/stylegan2-car-config-f-pt.pkl', 'wb') - # pickle.dump(G, output) - - os.makedirs(outdir, exist_ok=True) - - # Labels. - label = torch.zeros([1, G.c_dim], device=device) - if G.c_dim != 0: - if class_idx is None: - raise click.ClickException( - 'Must specify class label with --class when using a conditional network') - label[:, class_idx] = 1 - else: - if class_idx is not None: - print('warn: --class=lbl ignored when running on an unconditional network') - - # Generate images. - for seed_idx, seed in enumerate(seeds): - print('Generating image for seed %d (%d/%d) ...' % - (seed, seed_idx, len(seeds))) - z = torch.from_numpy(np.random.RandomState( - seed).randn(1, G.z_dim)).to(device) - - # Construct an inverse rotation/translation matrix and pass to the generator. The - # generator expects this matrix as an inverse to avoid potentially failing numerical - # operations in the network. - if hasattr(G.synthesis, 'input'): - m = make_transform(translate, rotate) - m = np.linalg.inv(m) - G.synthesis.input.transform.copy_(torch.from_numpy(m)) - - img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode) - img = (img.permute(0, 2, 3, 1) * 127.5 + - 128).clamp(0, 255).to(torch.uint8) - PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save( - f'{outdir}/seed{seed:04d}.png') - - -# ---------------------------------------------------------------------------- - -if __name__ == "__main__": - generate_images() # pylint: disable=no-value-for-parameter - -# ---------------------------------------------------------------------------- diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/util.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/util.py deleted file mode 100644 index 544c94895dfc0bfcd1285fde7cd2c102b71113ed..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/util.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -import torch -import cv2 -from torchvision import transforms -import numpy as np -import math - - -def visual(output, out_path): - output = (output + 1)/2 - output = torch.clamp(output, 0, 1) - if output.shape[1] == 1: - output = torch.cat([output, output, output], 1) - output = output[0].detach().cpu().permute(1, 2, 0).numpy() - output = (output*255).astype(np.uint8) - output = output[:, :, ::-1] - cv2.imwrite(out_path, output) - - -def get_lr(t, initial_lr, rampdown=0.25, rampup=0.05): - - lr_ramp = min(1, (1 - t) / rampdown) - lr_ramp = 0.5 - 0.5 * math.cos(lr_ramp * math.pi) - lr_ramp = lr_ramp * min(1, t / rampup) - return initial_lr * lr_ramp - - -def latent_noise(latent, strength): - noise = torch.randn_like(latent) * strength - - return latent + noise - - -def noise_regularize_(noises): - loss = 0 - - for noise in noises: - size = noise.shape[2] - - while True: - loss = ( - loss - + (noise * torch.roll(noise, shifts=1, dims=3)).mean().pow(2) - + (noise * torch.roll(noise, shifts=1, dims=2)).mean().pow(2) - ) - - if size <= 8: - break - - noise = noise.reshape([-1, 1, size // 2, 2, size // 2, 2]) - noise = noise.mean([3, 5]) - size //= 2 - - return loss - - -def noise_normalize_(noises): - for noise in noises: - mean = noise.mean() - std = noise.std() - - noise.data.add_(-mean).div_(std) - - -def tensor_to_numpy(x): - x = x[0].permute(1, 2, 0) - x = torch.clamp(x, -1, 1) - x = (x+1) * 127.5 - x = x.cpu().detach().numpy().astype(np.uint8) - return x - - -def numpy_to_tensor(x): - x = (x / 255 - 0.5) * 2 - x = torch.from_numpy(x).unsqueeze(0).permute(0, 3, 1, 2) - x = x.cuda().float() - return x - - -def tensor_to_pil(x): - x = torch.clamp(x, -1, 1) - x = (x+1) * 127.5 - return transforms.ToPILImage()(x.squeeze_(0)) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/custom_diffusion/README.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/custom_diffusion/README.md deleted file mode 100644 index 9e3c387e3d342c270fa72b22643ba7bd7548095e..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/custom_diffusion/README.md +++ /dev/null @@ -1,280 +0,0 @@ -# Custom Diffusion training example - -[Custom Diffusion](https://arxiv.org/abs/2212.04488) is a method to customize text-to-image models like Stable Diffusion given just a few (4~5) images of a subject. -The `train_custom_diffusion.py` script shows how to implement the training procedure and adapt it for stable diffusion. - -## Running locally with PyTorch - -### Installing the dependencies - -Before running the scripts, make sure to install the library's training dependencies: - -**Important** - -To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: - -```bash -git clone https://github.com/huggingface/diffusers -cd diffusers -pip install -e . -``` - -Then cd in the example folder and run - -```bash -pip install -r requirements.txt -pip install clip-retrieval -``` - -And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: - -```bash -accelerate config -``` - -Or for a default accelerate configuration without answering questions about your environment - -```bash -accelerate config default -``` - -Or if your environment doesn't support an interactive shell e.g. a notebook - -```python -from accelerate.utils import write_basic_config -write_basic_config() -``` -### Cat example 😺 - -Now let's get our dataset. Download dataset from [here](https://www.cs.cmu.edu/~custom-diffusion/assets/data.zip) and unzip it. - -We also collect 200 real images using `clip-retrieval` which are combined with the target images in the training dataset as a regularization. This prevents overfitting to the the given target image. The following flags enable the regularization `with_prior_preservation`, `real_prior` with `prior_loss_weight=1.`. -The `class_prompt` should be the category name same as target image. The collected real images are with text captions similar to the `class_prompt`. The retrieved image are saved in `class_data_dir`. You can disable `real_prior` to use generated images as regularization. To collect the real images use this command first before training. - -```bash -pip install clip-retrieval -python retrieve.py --class_prompt cat --class_data_dir real_reg/samples_cat --num_class_images 200 -``` - -**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export OUTPUT_DIR="path-to-save-model" -export INSTANCE_DIR="./data/cat" - -accelerate launch train_custom_diffusion.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --class_data_dir=./real_reg/samples_cat/ \ - --with_prior_preservation --real_prior --prior_loss_weight=1.0 \ - --class_prompt="cat" --num_class_images=200 \ - --instance_prompt="photo of a cat" \ - --resolution=512 \ - --train_batch_size=2 \ - --learning_rate=1e-5 \ - --lr_warmup_steps=0 \ - --max_train_steps=250 \ - --scale_lr --hflip \ - --modifier_token "" -``` - -**Use `--enable_xformers_memory_efficient_attention` for faster training with lower VRAM requirement (16GB per GPU). Follow [this guide](https://github.com/facebookresearch/xformers) for installation instructions.** - -To track your experiments using Weights and Biases (`wandb`) and to save intermediate results (whcih we HIGHLY recommend), follow these steps: - -* Install `wandb`: `pip install wandb`. -* Authorize: `wandb login`. -* Then specify a `validation_prompt` and set `report_to` to `wandb` while launching training. You can also configure the following related arguments: - * `num_validation_images` - * `validation_steps` - -Here is an example command: - -```bash -accelerate launch train_custom_diffusion.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --class_data_dir=./real_reg/samples_cat/ \ - --with_prior_preservation --real_prior --prior_loss_weight=1.0 \ - --class_prompt="cat" --num_class_images=200 \ - --instance_prompt="photo of a cat" \ - --resolution=512 \ - --train_batch_size=2 \ - --learning_rate=1e-5 \ - --lr_warmup_steps=0 \ - --max_train_steps=250 \ - --scale_lr --hflip \ - --modifier_token "" \ - --validation_prompt=" cat sitting in a bucket" \ - --report_to="wandb" -``` - -Here is an example [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/26ghrcau) where you can check out the intermediate results along with other training details. - -If you specify `--push_to_hub`, the learned parameters will be pushed to a repository on the Hugging Face Hub. Here is an [example repository](https://huggingface.co/sayakpaul/custom-diffusion-cat). - -### Training on multiple concepts 🐱🪵 - -Provide a [json](https://github.com/adobe-research/custom-diffusion/blob/main/assets/concept_list.json) file with the info about each concept, similar to [this](https://github.com/ShivamShrirao/diffusers/blob/main/examples/dreambooth/train_dreambooth.py). - -To collect the real images run this command for each concept in the json file. - -```bash -pip install clip-retrieval -python retrieve.py --class_prompt {} --class_data_dir {} --num_class_images 200 -``` - -And then we're ready to start training! - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_custom_diffusion.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --output_dir=$OUTPUT_DIR \ - --concepts_list=./concept_list.json \ - --with_prior_preservation --real_prior --prior_loss_weight=1.0 \ - --resolution=512 \ - --train_batch_size=2 \ - --learning_rate=1e-5 \ - --lr_warmup_steps=0 \ - --max_train_steps=500 \ - --num_class_images=200 \ - --scale_lr --hflip \ - --modifier_token "+" -``` - -Here is an example [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/3990tzkg) where you can check out the intermediate results along with other training details. - -### Training on human faces - -For fine-tuning on human faces we found the following configuration to work better: `learning_rate=5e-6`, `max_train_steps=1000 to 2000`, and `freeze_model=crossattn` with at least 15-20 images. - -To collect the real images use this command first before training. - -```bash -pip install clip-retrieval -python retrieve.py --class_prompt person --class_data_dir real_reg/samples_person --num_class_images 200 -``` - -Then start training! - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export OUTPUT_DIR="path-to-save-model" -export INSTANCE_DIR="path-to-images" - -accelerate launch train_custom_diffusion.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --class_data_dir=./real_reg/samples_person/ \ - --with_prior_preservation --real_prior --prior_loss_weight=1.0 \ - --class_prompt="person" --num_class_images=200 \ - --instance_prompt="photo of a person" \ - --resolution=512 \ - --train_batch_size=2 \ - --learning_rate=5e-6 \ - --lr_warmup_steps=0 \ - --max_train_steps=1000 \ - --scale_lr --hflip --noaug \ - --freeze_model crossattn \ - --modifier_token "" \ - --enable_xformers_memory_efficient_attention -``` - -## Inference - -Once you have trained a model using the above command, you can run inference using the below command. Make sure to include the `modifier token` (e.g. \ in above example) in your prompt. - -```python -import torch -from diffusers import DiffusionPipeline - -pipe = DiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16 -).to("cuda") -pipe.unet.load_attn_procs( - "path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin" -) -pipe.load_textual_inversion("path-to-save-model", weight_name=".bin") - -image = pipe( - " cat sitting in a bucket", - num_inference_steps=100, - guidance_scale=6.0, - eta=1.0, -).images[0] -image.save("cat.png") -``` - -It's possible to directly load these parameters from a Hub repository: - -```python -import torch -from huggingface_hub.repocard import RepoCard -from diffusers import DiffusionPipeline - -model_id = "sayakpaul/custom-diffusion-cat" -card = RepoCard.load(model_id) -base_model_id = card.data.to_dict()["base_model"] - -pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to( -"cuda") -pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin") -pipe.load_textual_inversion(model_id, weight_name=".bin") - -image = pipe( - " cat sitting in a bucket", - num_inference_steps=100, - guidance_scale=6.0, - eta=1.0, -).images[0] -image.save("cat.png") -``` - -Here is an example of performing inference with multiple concepts: - -```python -import torch -from huggingface_hub.repocard import RepoCard -from diffusers import DiffusionPipeline - -model_id = "sayakpaul/custom-diffusion-cat-wooden-pot" -card = RepoCard.load(model_id) -base_model_id = card.data.to_dict()["base_model"] - -pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to( -"cuda") -pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin") -pipe.load_textual_inversion(model_id, weight_name=".bin") -pipe.load_textual_inversion(model_id, weight_name=".bin") - -image = pipe( - "the cat sculpture in the style of a wooden pot", - num_inference_steps=100, - guidance_scale=6.0, - eta=1.0, -).images[0] -image.save("multi-subject.png") -``` - -Here, `cat` and `wooden pot` refer to the multiple concepts. - -### Inference from a training checkpoint - -You can also perform inference from one of the complete checkpoint saved during the training process, if you used the `--checkpointing_steps` argument. - -TODO. - -## Set grads to none -To save even more memory, pass the `--set_grads_to_none` argument to the script. This will set grads to None instead of zero. However, be aware that it changes certain behaviors, so if you start experiencing any problems, remove this argument. - -More info: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html - -## Experimental results -You can refer to [our webpage](https://www.cs.cmu.edu/~custom-diffusion/) that discusses our experiments in detail. We also released a more extensive dataset of 101 concepts for evaluating model customization methods. For more details please refer to our [dataset webpage](https://www.cs.cmu.edu/~custom-diffusion/dataset.html). \ No newline at end of file diff --git a/spaces/Andy1621/IAT_enhancement/model/blocks.py b/spaces/Andy1621/IAT_enhancement/model/blocks.py deleted file mode 100644 index 38d2f2160959c0441ff324f220d588fde9033a1b..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/IAT_enhancement/model/blocks.py +++ /dev/null @@ -1,281 +0,0 @@ -""" -Code copy from uniformer source code: -https://github.com/Sense-X/UniFormer -""" -import os -import torch -import torch.nn as nn -from functools import partial -import math -from timm.models.vision_transformer import VisionTransformer, _cfg -from timm.models.registry import register_model -from timm.models.layers import trunc_normal_, DropPath, to_2tuple - -# ResMLP's normalization -class Aff(nn.Module): - def __init__(self, dim): - super().__init__() - # learnable - self.alpha = nn.Parameter(torch.ones([1, 1, dim])) - self.beta = nn.Parameter(torch.zeros([1, 1, dim])) - - def forward(self, x): - x = x * self.alpha + self.beta - return x - -# Color Normalization -class Aff_channel(nn.Module): - def __init__(self, dim, channel_first = True): - super().__init__() - # learnable - self.alpha = nn.Parameter(torch.ones([1, 1, dim])) - self.beta = nn.Parameter(torch.zeros([1, 1, dim])) - self.color = nn.Parameter(torch.eye(dim)) - self.channel_first = channel_first - - def forward(self, x): - if self.channel_first: - x1 = torch.tensordot(x, self.color, dims=[[-1], [-1]]) - x2 = x1 * self.alpha + self.beta - else: - x1 = x * self.alpha + self.beta - x2 = torch.tensordot(x1, self.color, dims=[[-1], [-1]]) - return x2 - -class Mlp(nn.Module): - # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - -class CMlp(nn.Module): - # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Conv2d(in_features, hidden_features, 1) - self.act = act_layer() - self.fc2 = nn.Conv2d(hidden_features, out_features, 1) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - -class CBlock_ln(nn.Module): - def __init__(self, dim, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=Aff_channel, init_values=1e-4): - super().__init__() - self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) - #self.norm1 = Aff_channel(dim) - self.norm1 = norm_layer(dim) - self.conv1 = nn.Conv2d(dim, dim, 1) - self.conv2 = nn.Conv2d(dim, dim, 1) - self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - #self.norm2 = Aff_channel(dim) - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.gamma_1 = nn.Parameter(init_values * torch.ones((1, dim, 1, 1)), requires_grad=True) - self.gamma_2 = nn.Parameter(init_values * torch.ones((1, dim, 1, 1)), requires_grad=True) - self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x): - x = x + self.pos_embed(x) - B, C, H, W = x.shape - #print(x.shape) - norm_x = x.flatten(2).transpose(1, 2) - #print(norm_x.shape) - norm_x = self.norm1(norm_x) - norm_x = norm_x.view(B, H, W, C).permute(0, 3, 1, 2) - - - x = x + self.drop_path(self.gamma_1*self.conv2(self.attn(self.conv1(norm_x)))) - norm_x = x.flatten(2).transpose(1, 2) - norm_x = self.norm2(norm_x) - norm_x = norm_x.view(B, H, W, C).permute(0, 3, 1, 2) - x = x + self.drop_path(self.gamma_2*self.mlp(norm_x)) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - #print(x.shape) - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class WindowAttention(nn.Module): - r""" Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x): - B_, N, C = x.shape - qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = (q @ k.transpose(-2, -1)) - - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - -## Layer_norm, Aff_norm, Aff_channel_norm -class SwinTransformerBlock(nn.Module): - r""" Swin Transformer Block. - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resulotion. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Module, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, dim, num_heads=2, window_size=8, shift_size=0, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., - act_layer=nn.GELU, norm_layer=Aff_channel): - super().__init__() - self.dim = dim - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - - self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) - #self.norm1 = norm_layer(dim) - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, - qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - #self.norm2 = norm_layer(dim) - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x): - x = x + self.pos_embed(x) - B, C, H, W = x.shape - x = x.flatten(2).transpose(1, 2) - - shortcut = x - x = self.norm1(x) - x = x.view(B, H, W, C) - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - else: - shifted_x = x - - # partition windows - x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C - - x = shifted_x - x = x.view(B, H * W, C) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - x = x.transpose(1, 2).reshape(B, C, H, W) - - return x - - -if __name__ == "__main__": - os.environ['CUDA_VISIBLE_DEVICES']='1' - cb_blovk = CBlock_ln(dim = 16) - x = torch.Tensor(1, 16, 400, 600) - swin = SwinTransformerBlock(dim=16, num_heads=4) - x = cb_blovk(x) - print(x.shape) diff --git a/spaces/Andy1621/uniformer_video_demo/kinetics_class_index.py b/spaces/Andy1621/uniformer_video_demo/kinetics_class_index.py deleted file mode 100644 index 597e23e72c690f2dce0525b24bdcc2a992c4d594..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_video_demo/kinetics_class_index.py +++ /dev/null @@ -1,402 +0,0 @@ -kinetics_classnames = { - "0": "riding a bike", - "1": "marching", - "2": "dodgeball", - "3": "playing cymbals", - "4": "checking tires", - "5": "roller skating", - "6": "tasting beer", - "7": "clapping", - "8": "drawing", - "9": "juggling fire", - "10": "bobsledding", - "11": "petting animal (not cat)", - "12": "spray painting", - "13": "training dog", - "14": "eating watermelon", - "15": "building cabinet", - "16": "applauding", - "17": "playing harp", - "18": "balloon blowing", - "19": "sled dog racing", - "20": "wrestling", - "21": "pole vault", - "22": "hurling (sport)", - "23": "riding scooter", - "24": "shearing sheep", - "25": "sweeping floor", - "26": "eating carrots", - "27": "skateboarding", - "28": "dunking basketball", - "29": "disc golfing", - "30": "eating spaghetti", - "31": "playing flute", - "32": "riding mechanical bull", - "33": "making sushi", - "34": "trapezing", - "35": "picking fruit", - "36": "stretching leg", - "37": "playing ukulele", - "38": "tying tie", - "39": "skydiving", - "40": "playing cello", - "41": "jumping into pool", - "42": "shooting goal (soccer)", - "43": "trimming trees", - "44": "bookbinding", - "45": "ski jumping", - "46": "walking the dog", - "47": "riding unicycle", - "48": "shaving head", - "49": "hopscotch", - "50": "playing piano", - "51": "parasailing", - "52": "bartending", - "53": "kicking field goal", - "54": "finger snapping", - "55": "dining", - "56": "yawning", - "57": "peeling potatoes", - "58": "canoeing or kayaking", - "59": "front raises", - "60": "laughing", - "61": "dancing macarena", - "62": "digging", - "63": "reading newspaper", - "64": "hitting baseball", - "65": "clay pottery making", - "66": "exercising with an exercise ball", - "67": "playing saxophone", - "68": "shooting basketball", - "69": "washing hair", - "70": "lunge", - "71": "brushing hair", - "72": "curling hair", - "73": "kitesurfing", - "74": "tapping guitar", - "75": "bending back", - "76": "skipping rope", - "77": "situp", - "78": "folding paper", - "79": "cracking neck", - "80": "assembling computer", - "81": "cleaning gutters", - "82": "blowing out candles", - "83": "shaking hands", - "84": "dancing gangnam style", - "85": "windsurfing", - "86": "tap dancing", - "87": "skiing (not slalom or crosscountry)", - "88": "bandaging", - "89": "push up", - "90": "doing nails", - "91": "punching person (boxing)", - "92": "bouncing on trampoline", - "93": "scrambling eggs", - "94": "singing", - "95": "cleaning floor", - "96": "krumping", - "97": "drumming fingers", - "98": "snowmobiling", - "99": "gymnastics tumbling", - "100": "headbanging", - "101": "catching or throwing frisbee", - "102": "riding elephant", - "103": "bee keeping", - "104": "feeding birds", - "105": "snatch weight lifting", - "106": "mowing lawn", - "107": "fixing hair", - "108": "playing trumpet", - "109": "flying kite", - "110": "crossing river", - "111": "swinging legs", - "112": "sanding floor", - "113": "belly dancing", - "114": "sneezing", - "115": "clean and jerk", - "116": "side kick", - "117": "filling eyebrows", - "118": "shuffling cards", - "119": "recording music", - "120": "cartwheeling", - "121": "feeding fish", - "122": "folding clothes", - "123": "water skiing", - "124": "tobogganing", - "125": "blowing leaves", - "126": "smoking", - "127": "unboxing", - "128": "tai chi", - "129": "waxing legs", - "130": "riding camel", - "131": "slapping", - "132": "tossing salad", - "133": "capoeira", - "134": "playing cards", - "135": "playing organ", - "136": "playing violin", - "137": "playing drums", - "138": "tapping pen", - "139": "vault", - "140": "shoveling snow", - "141": "playing tennis", - "142": "getting a tattoo", - "143": "making a sandwich", - "144": "making tea", - "145": "grinding meat", - "146": "squat", - "147": "eating doughnuts", - "148": "ice fishing", - "149": "snowkiting", - "150": "kicking soccer ball", - "151": "playing controller", - "152": "giving or receiving award", - "153": "welding", - "154": "throwing discus", - "155": "throwing axe", - "156": "ripping paper", - "157": "swimming butterfly stroke", - "158": "air drumming", - "159": "blowing nose", - "160": "hockey stop", - "161": "taking a shower", - "162": "bench pressing", - "163": "planting trees", - "164": "pumping fist", - "165": "climbing tree", - "166": "tickling", - "167": "high kick", - "168": "waiting in line", - "169": "slacklining", - "170": "tango dancing", - "171": "hurdling", - "172": "carrying baby", - "173": "celebrating", - "174": "sharpening knives", - "175": "passing American football (in game)", - "176": "headbutting", - "177": "playing recorder", - "178": "brush painting", - "179": "garbage collecting", - "180": "robot dancing", - "181": "shredding paper", - "182": "pumping gas", - "183": "rock climbing", - "184": "hula hooping", - "185": "braiding hair", - "186": "opening present", - "187": "texting", - "188": "decorating the christmas tree", - "189": "answering questions", - "190": "playing keyboard", - "191": "writing", - "192": "bungee jumping", - "193": "sniffing", - "194": "eating burger", - "195": "playing accordion", - "196": "making pizza", - "197": "playing volleyball", - "198": "tasting food", - "199": "pushing cart", - "200": "spinning poi", - "201": "cleaning windows", - "202": "arm wrestling", - "203": "changing oil", - "204": "swimming breast stroke", - "205": "tossing coin", - "206": "deadlifting", - "207": "hoverboarding", - "208": "cutting watermelon", - "209": "cheerleading", - "210": "snorkeling", - "211": "washing hands", - "212": "eating cake", - "213": "pull ups", - "214": "surfing water", - "215": "eating hotdog", - "216": "holding snake", - "217": "playing harmonica", - "218": "ironing", - "219": "cutting nails", - "220": "golf chipping", - "221": "shot put", - "222": "hugging", - "223": "playing clarinet", - "224": "faceplanting", - "225": "trimming or shaving beard", - "226": "drinking shots", - "227": "riding mountain bike", - "228": "tying bow tie", - "229": "swinging on something", - "230": "skiing crosscountry", - "231": "unloading truck", - "232": "cleaning pool", - "233": "jogging", - "234": "ice climbing", - "235": "mopping floor", - "236": "making bed", - "237": "diving cliff", - "238": "washing dishes", - "239": "grooming dog", - "240": "weaving basket", - "241": "frying vegetables", - "242": "stomping grapes", - "243": "moving furniture", - "244": "cooking sausages", - "245": "doing laundry", - "246": "dying hair", - "247": "knitting", - "248": "reading book", - "249": "baby waking up", - "250": "punching bag", - "251": "surfing crowd", - "252": "cooking chicken", - "253": "pushing car", - "254": "springboard diving", - "255": "swing dancing", - "256": "massaging legs", - "257": "beatboxing", - "258": "breading or breadcrumbing", - "259": "somersaulting", - "260": "brushing teeth", - "261": "stretching arm", - "262": "juggling balls", - "263": "massaging person's head", - "264": "eating ice cream", - "265": "extinguishing fire", - "266": "hammer throw", - "267": "whistling", - "268": "crawling baby", - "269": "using remote controller (not gaming)", - "270": "playing cricket", - "271": "opening bottle", - "272": "playing xylophone", - "273": "motorcycling", - "274": "driving car", - "275": "exercising arm", - "276": "passing American football (not in game)", - "277": "playing kickball", - "278": "sticking tongue out", - "279": "flipping pancake", - "280": "catching fish", - "281": "eating chips", - "282": "shaking head", - "283": "sword fighting", - "284": "playing poker", - "285": "cooking on campfire", - "286": "doing aerobics", - "287": "paragliding", - "288": "using segway", - "289": "folding napkins", - "290": "playing bagpipes", - "291": "gargling", - "292": "skiing slalom", - "293": "strumming guitar", - "294": "javelin throw", - "295": "waxing back", - "296": "riding or walking with horse", - "297": "plastering", - "298": "long jump", - "299": "parkour", - "300": "wrapping present", - "301": "egg hunting", - "302": "archery", - "303": "cleaning toilet", - "304": "swimming backstroke", - "305": "snowboarding", - "306": "catching or throwing baseball", - "307": "massaging back", - "308": "blowing glass", - "309": "playing guitar", - "310": "playing chess", - "311": "golf driving", - "312": "presenting weather forecast", - "313": "rock scissors paper", - "314": "high jump", - "315": "baking cookies", - "316": "using computer", - "317": "washing feet", - "318": "arranging flowers", - "319": "playing bass guitar", - "320": "spraying", - "321": "cutting pineapple", - "322": "waxing chest", - "323": "auctioning", - "324": "jetskiing", - "325": "drinking", - "326": "busking", - "327": "playing monopoly", - "328": "salsa dancing", - "329": "waxing eyebrows", - "330": "watering plants", - "331": "zumba", - "332": "chopping wood", - "333": "pushing wheelchair", - "334": "carving pumpkin", - "335": "building shed", - "336": "making jewelry", - "337": "catching or throwing softball", - "338": "bending metal", - "339": "ice skating", - "340": "dancing charleston", - "341": "abseiling", - "342": "climbing a rope", - "343": "crying", - "344": "cleaning shoes", - "345": "dancing ballet", - "346": "driving tractor", - "347": "triple jump", - "348": "throwing ball", - "349": "getting a haircut", - "350": "running on treadmill", - "351": "climbing ladder", - "352": "blasting sand", - "353": "playing trombone", - "354": "drop kicking", - "355": "country line dancing", - "356": "changing wheel", - "357": "feeding goats", - "358": "tying knot (not on a tie)", - "359": "setting table", - "360": "shaving legs", - "361": "kissing", - "362": "riding mule", - "363": "counting money", - "364": "laying bricks", - "365": "barbequing", - "366": "news anchoring", - "367": "smoking hookah", - "368": "cooking egg", - "369": "peeling apples", - "370": "yoga", - "371": "sharpening pencil", - "372": "dribbling basketball", - "373": "petting cat", - "374": "playing ice hockey", - "375": "milking cow", - "376": "shining shoes", - "377": "juggling soccer ball", - "378": "scuba diving", - "379": "playing squash or racquetball", - "380": "drinking beer", - "381": "sign language interpreting", - "382": "playing basketball", - "383": "breakdancing", - "384": "testifying", - "385": "making snowman", - "386": "golf putting", - "387": "playing didgeridoo", - "388": "biking through snow", - "389": "sailing", - "390": "jumpstyle dancing", - "391": "water sliding", - "392": "grooming horse", - "393": "massaging feet", - "394": "playing paintball", - "395": "making a cake", - "396": "bowling", - "397": "contact juggling", - "398": "applying cream", - "399": "playing badminton" -} \ No newline at end of file diff --git a/spaces/AquaSuisei/ChatGPTXE/modules/overwrites.py b/spaces/AquaSuisei/ChatGPTXE/modules/overwrites.py deleted file mode 100644 index bfcd4d01b7d7bec1184a8d09113933bca860530b..0000000000000000000000000000000000000000 --- a/spaces/AquaSuisei/ChatGPTXE/modules/overwrites.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import annotations -import logging - -from llama_index import Prompt -from typing import List, Tuple -import mdtex2html - -from modules.presets import * -from modules.llama_func import * - - -def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]: - logging.debug("Compacting text chunks...🚀🚀🚀") - combined_str = [c.strip() for c in text_chunks if c.strip()] - combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)] - combined_str = "\n\n".join(combined_str) - # resplit based on self.max_chunk_overlap - text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1) - return text_splitter.split_text(combined_str) - - -def postprocess( - self, y: List[Tuple[str | None, str | None]] -) -> List[Tuple[str | None, str | None]]: - """ - Parameters: - y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. - Returns: - List of tuples representing the message and response. Each message and response will be a string of HTML. - """ - if y is None or y == []: - return [] - user, bot = y[-1] - if not detect_converted_mark(user): - user = convert_asis(user) - if not detect_converted_mark(bot): - bot = convert_mdtext(bot) - y[-1] = (user, bot) - return y - -with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2: - customJS = f.read() - kelpyCodos = f2.read() - -def reload_javascript(): - print("Reloading javascript...") - js = f'' - def template_response(*args, **kwargs): - res = GradioTemplateResponseOriginal(*args, **kwargs) - res.body = res.body.replace(b'', f'{js}'.encode("utf8")) - res.init_headers() - return res - - gr.routes.templates.TemplateResponse = template_response - -GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse \ No newline at end of file diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/euctwprober.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/euctwprober.py deleted file mode 100644 index a37ab18995822ad6b3372d56366becdccf9a4c26..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/euctwprober.py +++ /dev/null @@ -1,47 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .chardistribution import EUCTWDistributionAnalysis -from .codingstatemachine import CodingStateMachine -from .mbcharsetprober import MultiByteCharSetProber -from .mbcssm import EUCTW_SM_MODEL - - -class EUCTWProber(MultiByteCharSetProber): - def __init__(self) -> None: - super().__init__() - self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL) - self.distribution_analyzer = EUCTWDistributionAnalysis() - self.reset() - - @property - def charset_name(self) -> str: - return "EUC-TW" - - @property - def language(self) -> str: - return "Taiwan" diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/__init__.py deleted file mode 100644 index 4547fc522b690ba2697843edd044f2039a4123a9..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/__init__.py +++ /dev/null @@ -1,49 +0,0 @@ -from __future__ import absolute_import - -# For backwards compatibility, provide imports that used to be here. -from .connection import is_connection_dropped -from .request import SKIP_HEADER, SKIPPABLE_HEADERS, make_headers -from .response import is_fp_closed -from .retry import Retry -from .ssl_ import ( - ALPN_PROTOCOLS, - HAS_SNI, - IS_PYOPENSSL, - IS_SECURETRANSPORT, - PROTOCOL_TLS, - SSLContext, - assert_fingerprint, - resolve_cert_reqs, - resolve_ssl_version, - ssl_wrap_socket, -) -from .timeout import Timeout, current_time -from .url import Url, get_host, parse_url, split_first -from .wait import wait_for_read, wait_for_write - -__all__ = ( - "HAS_SNI", - "IS_PYOPENSSL", - "IS_SECURETRANSPORT", - "SSLContext", - "PROTOCOL_TLS", - "ALPN_PROTOCOLS", - "Retry", - "Timeout", - "Url", - "assert_fingerprint", - "current_time", - "is_connection_dropped", - "is_fp_closed", - "get_host", - "parse_url", - "make_headers", - "resolve_cert_reqs", - "resolve_ssl_version", - "split_first", - "ssl_wrap_socket", - "wait_for_read", - "wait_for_write", - "SKIP_HEADER", - "SKIPPABLE_HEADERS", -) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/recipes.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/recipes.py deleted file mode 100644 index 521abd7c2ca633f90a5ba13a8060c5c3d0c32205..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/recipes.py +++ /dev/null @@ -1,620 +0,0 @@ -"""Imported from the recipes section of the itertools documentation. - -All functions taken from the recipes section of the itertools library docs -[1]_. -Some backward-compatible usability improvements have been made. - -.. [1] http://docs.python.org/library/itertools.html#recipes - -""" -import warnings -from collections import deque -from itertools import ( - chain, - combinations, - count, - cycle, - groupby, - islice, - repeat, - starmap, - tee, - zip_longest, -) -import operator -from random import randrange, sample, choice - -__all__ = [ - 'all_equal', - 'consume', - 'convolve', - 'dotproduct', - 'first_true', - 'flatten', - 'grouper', - 'iter_except', - 'ncycles', - 'nth', - 'nth_combination', - 'padnone', - 'pad_none', - 'pairwise', - 'partition', - 'powerset', - 'prepend', - 'quantify', - 'random_combination_with_replacement', - 'random_combination', - 'random_permutation', - 'random_product', - 'repeatfunc', - 'roundrobin', - 'tabulate', - 'tail', - 'take', - 'unique_everseen', - 'unique_justseen', -] - - -def take(n, iterable): - """Return first *n* items of the iterable as a list. - - >>> take(3, range(10)) - [0, 1, 2] - - If there are fewer than *n* items in the iterable, all of them are - returned. - - >>> take(10, range(3)) - [0, 1, 2] - - """ - return list(islice(iterable, n)) - - -def tabulate(function, start=0): - """Return an iterator over the results of ``func(start)``, - ``func(start + 1)``, ``func(start + 2)``... - - *func* should be a function that accepts one integer argument. - - If *start* is not specified it defaults to 0. It will be incremented each - time the iterator is advanced. - - >>> square = lambda x: x ** 2 - >>> iterator = tabulate(square, -3) - >>> take(4, iterator) - [9, 4, 1, 0] - - """ - return map(function, count(start)) - - -def tail(n, iterable): - """Return an iterator over the last *n* items of *iterable*. - - >>> t = tail(3, 'ABCDEFG') - >>> list(t) - ['E', 'F', 'G'] - - """ - return iter(deque(iterable, maxlen=n)) - - -def consume(iterator, n=None): - """Advance *iterable* by *n* steps. If *n* is ``None``, consume it - entirely. - - Efficiently exhausts an iterator without returning values. Defaults to - consuming the whole iterator, but an optional second argument may be - provided to limit consumption. - - >>> i = (x for x in range(10)) - >>> next(i) - 0 - >>> consume(i, 3) - >>> next(i) - 4 - >>> consume(i) - >>> next(i) - Traceback (most recent call last): - File "", line 1, in - StopIteration - - If the iterator has fewer items remaining than the provided limit, the - whole iterator will be consumed. - - >>> i = (x for x in range(3)) - >>> consume(i, 5) - >>> next(i) - Traceback (most recent call last): - File "", line 1, in - StopIteration - - """ - # Use functions that consume iterators at C speed. - if n is None: - # feed the entire iterator into a zero-length deque - deque(iterator, maxlen=0) - else: - # advance to the empty slice starting at position n - next(islice(iterator, n, n), None) - - -def nth(iterable, n, default=None): - """Returns the nth item or a default value. - - >>> l = range(10) - >>> nth(l, 3) - 3 - >>> nth(l, 20, "zebra") - 'zebra' - - """ - return next(islice(iterable, n, None), default) - - -def all_equal(iterable): - """ - Returns ``True`` if all the elements are equal to each other. - - >>> all_equal('aaaa') - True - >>> all_equal('aaab') - False - - """ - g = groupby(iterable) - return next(g, True) and not next(g, False) - - -def quantify(iterable, pred=bool): - """Return the how many times the predicate is true. - - >>> quantify([True, False, True]) - 2 - - """ - return sum(map(pred, iterable)) - - -def pad_none(iterable): - """Returns the sequence of elements and then returns ``None`` indefinitely. - - >>> take(5, pad_none(range(3))) - [0, 1, 2, None, None] - - Useful for emulating the behavior of the built-in :func:`map` function. - - See also :func:`padded`. - - """ - return chain(iterable, repeat(None)) - - -padnone = pad_none - - -def ncycles(iterable, n): - """Returns the sequence elements *n* times - - >>> list(ncycles(["a", "b"], 3)) - ['a', 'b', 'a', 'b', 'a', 'b'] - - """ - return chain.from_iterable(repeat(tuple(iterable), n)) - - -def dotproduct(vec1, vec2): - """Returns the dot product of the two iterables. - - >>> dotproduct([10, 10], [20, 20]) - 400 - - """ - return sum(map(operator.mul, vec1, vec2)) - - -def flatten(listOfLists): - """Return an iterator flattening one level of nesting in a list of lists. - - >>> list(flatten([[0, 1], [2, 3]])) - [0, 1, 2, 3] - - See also :func:`collapse`, which can flatten multiple levels of nesting. - - """ - return chain.from_iterable(listOfLists) - - -def repeatfunc(func, times=None, *args): - """Call *func* with *args* repeatedly, returning an iterable over the - results. - - If *times* is specified, the iterable will terminate after that many - repetitions: - - >>> from operator import add - >>> times = 4 - >>> args = 3, 5 - >>> list(repeatfunc(add, times, *args)) - [8, 8, 8, 8] - - If *times* is ``None`` the iterable will not terminate: - - >>> from random import randrange - >>> times = None - >>> args = 1, 11 - >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP - [2, 4, 8, 1, 8, 4] - - """ - if times is None: - return starmap(func, repeat(args)) - return starmap(func, repeat(args, times)) - - -def _pairwise(iterable): - """Returns an iterator of paired items, overlapping, from the original - - >>> take(4, pairwise(count())) - [(0, 1), (1, 2), (2, 3), (3, 4)] - - On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`. - - """ - a, b = tee(iterable) - next(b, None) - yield from zip(a, b) - - -try: - from itertools import pairwise as itertools_pairwise -except ImportError: - pairwise = _pairwise -else: - - def pairwise(iterable): - yield from itertools_pairwise(iterable) - - pairwise.__doc__ = _pairwise.__doc__ - - -def grouper(iterable, n, fillvalue=None): - """Collect data into fixed-length chunks or blocks. - - >>> list(grouper('ABCDEFG', 3, 'x')) - [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] - - """ - if isinstance(iterable, int): - warnings.warn( - "grouper expects iterable as first parameter", DeprecationWarning - ) - n, iterable = iterable, n - args = [iter(iterable)] * n - return zip_longest(fillvalue=fillvalue, *args) - - -def roundrobin(*iterables): - """Yields an item from each iterable, alternating between them. - - >>> list(roundrobin('ABC', 'D', 'EF')) - ['A', 'D', 'E', 'B', 'F', 'C'] - - This function produces the same output as :func:`interleave_longest`, but - may perform better for some inputs (in particular when the number of - iterables is small). - - """ - # Recipe credited to George Sakkis - pending = len(iterables) - nexts = cycle(iter(it).__next__ for it in iterables) - while pending: - try: - for next in nexts: - yield next() - except StopIteration: - pending -= 1 - nexts = cycle(islice(nexts, pending)) - - -def partition(pred, iterable): - """ - Returns a 2-tuple of iterables derived from the input iterable. - The first yields the items that have ``pred(item) == False``. - The second yields the items that have ``pred(item) == True``. - - >>> is_odd = lambda x: x % 2 != 0 - >>> iterable = range(10) - >>> even_items, odd_items = partition(is_odd, iterable) - >>> list(even_items), list(odd_items) - ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]) - - If *pred* is None, :func:`bool` is used. - - >>> iterable = [0, 1, False, True, '', ' '] - >>> false_items, true_items = partition(None, iterable) - >>> list(false_items), list(true_items) - ([0, False, ''], [1, True, ' ']) - - """ - if pred is None: - pred = bool - - evaluations = ((pred(x), x) for x in iterable) - t1, t2 = tee(evaluations) - return ( - (x for (cond, x) in t1 if not cond), - (x for (cond, x) in t2 if cond), - ) - - -def powerset(iterable): - """Yields all possible subsets of the iterable. - - >>> list(powerset([1, 2, 3])) - [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] - - :func:`powerset` will operate on iterables that aren't :class:`set` - instances, so repeated elements in the input will produce repeated elements - in the output. Use :func:`unique_everseen` on the input to avoid generating - duplicates: - - >>> seq = [1, 1, 0] - >>> list(powerset(seq)) - [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)] - >>> from more_itertools import unique_everseen - >>> list(powerset(unique_everseen(seq))) - [(), (1,), (0,), (1, 0)] - - """ - s = list(iterable) - return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) - - -def unique_everseen(iterable, key=None): - """ - Yield unique elements, preserving order. - - >>> list(unique_everseen('AAAABBBCCDAABBB')) - ['A', 'B', 'C', 'D'] - >>> list(unique_everseen('ABBCcAD', str.lower)) - ['A', 'B', 'C', 'D'] - - Sequences with a mix of hashable and unhashable items can be used. - The function will be slower (i.e., `O(n^2)`) for unhashable items. - - Remember that ``list`` objects are unhashable - you can use the *key* - parameter to transform the list to a tuple (which is hashable) to - avoid a slowdown. - - >>> iterable = ([1, 2], [2, 3], [1, 2]) - >>> list(unique_everseen(iterable)) # Slow - [[1, 2], [2, 3]] - >>> list(unique_everseen(iterable, key=tuple)) # Faster - [[1, 2], [2, 3]] - - Similary, you may want to convert unhashable ``set`` objects with - ``key=frozenset``. For ``dict`` objects, - ``key=lambda x: frozenset(x.items())`` can be used. - - """ - seenset = set() - seenset_add = seenset.add - seenlist = [] - seenlist_add = seenlist.append - use_key = key is not None - - for element in iterable: - k = key(element) if use_key else element - try: - if k not in seenset: - seenset_add(k) - yield element - except TypeError: - if k not in seenlist: - seenlist_add(k) - yield element - - -def unique_justseen(iterable, key=None): - """Yields elements in order, ignoring serial duplicates - - >>> list(unique_justseen('AAAABBBCCDAABBB')) - ['A', 'B', 'C', 'D', 'A', 'B'] - >>> list(unique_justseen('ABBCcAD', str.lower)) - ['A', 'B', 'C', 'A', 'D'] - - """ - return map(next, map(operator.itemgetter(1), groupby(iterable, key))) - - -def iter_except(func, exception, first=None): - """Yields results from a function repeatedly until an exception is raised. - - Converts a call-until-exception interface to an iterator interface. - Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel - to end the loop. - - >>> l = [0, 1, 2] - >>> list(iter_except(l.pop, IndexError)) - [2, 1, 0] - - """ - try: - if first is not None: - yield first() - while 1: - yield func() - except exception: - pass - - -def first_true(iterable, default=None, pred=None): - """ - Returns the first true value in the iterable. - - If no true value is found, returns *default* - - If *pred* is not None, returns the first item for which - ``pred(item) == True`` . - - >>> first_true(range(10)) - 1 - >>> first_true(range(10), pred=lambda x: x > 5) - 6 - >>> first_true(range(10), default='missing', pred=lambda x: x > 9) - 'missing' - - """ - return next(filter(pred, iterable), default) - - -def random_product(*args, repeat=1): - """Draw an item at random from each of the input iterables. - - >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP - ('c', 3, 'Z') - - If *repeat* is provided as a keyword argument, that many items will be - drawn from each iterable. - - >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP - ('a', 2, 'd', 3) - - This equivalent to taking a random selection from - ``itertools.product(*args, **kwarg)``. - - """ - pools = [tuple(pool) for pool in args] * repeat - return tuple(choice(pool) for pool in pools) - - -def random_permutation(iterable, r=None): - """Return a random *r* length permutation of the elements in *iterable*. - - If *r* is not specified or is ``None``, then *r* defaults to the length of - *iterable*. - - >>> random_permutation(range(5)) # doctest:+SKIP - (3, 4, 0, 1, 2) - - This equivalent to taking a random selection from - ``itertools.permutations(iterable, r)``. - - """ - pool = tuple(iterable) - r = len(pool) if r is None else r - return tuple(sample(pool, r)) - - -def random_combination(iterable, r): - """Return a random *r* length subsequence of the elements in *iterable*. - - >>> random_combination(range(5), 3) # doctest:+SKIP - (2, 3, 4) - - This equivalent to taking a random selection from - ``itertools.combinations(iterable, r)``. - - """ - pool = tuple(iterable) - n = len(pool) - indices = sorted(sample(range(n), r)) - return tuple(pool[i] for i in indices) - - -def random_combination_with_replacement(iterable, r): - """Return a random *r* length subsequence of elements in *iterable*, - allowing individual elements to be repeated. - - >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP - (0, 0, 1, 2, 2) - - This equivalent to taking a random selection from - ``itertools.combinations_with_replacement(iterable, r)``. - - """ - pool = tuple(iterable) - n = len(pool) - indices = sorted(randrange(n) for i in range(r)) - return tuple(pool[i] for i in indices) - - -def nth_combination(iterable, r, index): - """Equivalent to ``list(combinations(iterable, r))[index]``. - - The subsequences of *iterable* that are of length *r* can be ordered - lexicographically. :func:`nth_combination` computes the subsequence at - sort position *index* directly, without computing the previous - subsequences. - - >>> nth_combination(range(5), 3, 5) - (0, 3, 4) - - ``ValueError`` will be raised If *r* is negative or greater than the length - of *iterable*. - ``IndexError`` will be raised if the given *index* is invalid. - """ - pool = tuple(iterable) - n = len(pool) - if (r < 0) or (r > n): - raise ValueError - - c = 1 - k = min(r, n - r) - for i in range(1, k + 1): - c = c * (n - k + i) // i - - if index < 0: - index += c - - if (index < 0) or (index >= c): - raise IndexError - - result = [] - while r: - c, n, r = c * r // n, n - 1, r - 1 - while index >= c: - index -= c - c, n = c * (n - r) // n, n - 1 - result.append(pool[-1 - n]) - - return tuple(result) - - -def prepend(value, iterator): - """Yield *value*, followed by the elements in *iterator*. - - >>> value = '0' - >>> iterator = ['1', '2', '3'] - >>> list(prepend(value, iterator)) - ['0', '1', '2', '3'] - - To prepend multiple values, see :func:`itertools.chain` - or :func:`value_chain`. - - """ - return chain([value], iterator) - - -def convolve(signal, kernel): - """Convolve the iterable *signal* with the iterable *kernel*. - - >>> signal = (1, 2, 3, 4, 5) - >>> kernel = [3, 2, 1] - >>> list(convolve(signal, kernel)) - [3, 8, 14, 20, 26, 14, 5] - - Note: the input arguments are not interchangeable, as the *kernel* - is immediately consumed and stored. - - """ - kernel = tuple(kernel)[::-1] - n = len(kernel) - window = deque([0], maxlen=n) * n - for x in chain(signal, repeat(0, n - 1)): - window.append(x) - yield sum(map(operator.mul, kernel, window)) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/testing.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/testing.py deleted file mode 100644 index 84a0ef17078c99e5917db41e3dbaf035fe206d7c..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/testing.py +++ /dev/null @@ -1,331 +0,0 @@ -# testing.py - -from contextlib import contextmanager -import typing - -from .core import ( - ParserElement, - ParseException, - Keyword, - __diag__, - __compat__, -) - - -class pyparsing_test: - """ - namespace class for classes useful in writing unit tests - """ - - class reset_pyparsing_context: - """ - Context manager to be used when writing unit tests that modify pyparsing config values: - - packrat parsing - - bounded recursion parsing - - default whitespace characters. - - default keyword characters - - literal string auto-conversion class - - __diag__ settings - - Example:: - - with reset_pyparsing_context(): - # test that literals used to construct a grammar are automatically suppressed - ParserElement.inlineLiteralsUsing(Suppress) - - term = Word(alphas) | Word(nums) - group = Group('(' + term[...] + ')') - - # assert that the '()' characters are not included in the parsed tokens - self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def']) - - # after exiting context manager, literals are converted to Literal expressions again - """ - - def __init__(self): - self._save_context = {} - - def save(self): - self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS - self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS - - self._save_context[ - "literal_string_class" - ] = ParserElement._literalStringClass - - self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace - - self._save_context["packrat_enabled"] = ParserElement._packratEnabled - if ParserElement._packratEnabled: - self._save_context[ - "packrat_cache_size" - ] = ParserElement.packrat_cache.size - else: - self._save_context["packrat_cache_size"] = None - self._save_context["packrat_parse"] = ParserElement._parse - self._save_context[ - "recursion_enabled" - ] = ParserElement._left_recursion_enabled - - self._save_context["__diag__"] = { - name: getattr(__diag__, name) for name in __diag__._all_names - } - - self._save_context["__compat__"] = { - "collect_all_And_tokens": __compat__.collect_all_And_tokens - } - - return self - - def restore(self): - # reset pyparsing global state - if ( - ParserElement.DEFAULT_WHITE_CHARS - != self._save_context["default_whitespace"] - ): - ParserElement.set_default_whitespace_chars( - self._save_context["default_whitespace"] - ) - - ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"] - - Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] - ParserElement.inlineLiteralsUsing( - self._save_context["literal_string_class"] - ) - - for name, value in self._save_context["__diag__"].items(): - (__diag__.enable if value else __diag__.disable)(name) - - ParserElement._packratEnabled = False - if self._save_context["packrat_enabled"]: - ParserElement.enable_packrat(self._save_context["packrat_cache_size"]) - else: - ParserElement._parse = self._save_context["packrat_parse"] - ParserElement._left_recursion_enabled = self._save_context[ - "recursion_enabled" - ] - - __compat__.collect_all_And_tokens = self._save_context["__compat__"] - - return self - - def copy(self): - ret = type(self)() - ret._save_context.update(self._save_context) - return ret - - def __enter__(self): - return self.save() - - def __exit__(self, *args): - self.restore() - - class TestParseResultsAsserts: - """ - A mixin class to add parse results assertion methods to normal unittest.TestCase classes. - """ - - def assertParseResultsEquals( - self, result, expected_list=None, expected_dict=None, msg=None - ): - """ - Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``, - and compare any defined results names with an optional ``expected_dict``. - """ - if expected_list is not None: - self.assertEqual(expected_list, result.as_list(), msg=msg) - if expected_dict is not None: - self.assertEqual(expected_dict, result.as_dict(), msg=msg) - - def assertParseAndCheckList( - self, expr, test_string, expected_list, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ``ParseResults.asList()`` is equal to the ``expected_list``. - """ - result = expr.parse_string(test_string, parse_all=True) - if verbose: - print(result.dump()) - else: - print(result.as_list()) - self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) - - def assertParseAndCheckDict( - self, expr, test_string, expected_dict, msg=None, verbose=True - ): - """ - Convenience wrapper assert to test a parser element and input string, and assert that - the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``. - """ - result = expr.parse_string(test_string, parseAll=True) - if verbose: - print(result.dump()) - else: - print(result.as_list()) - self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) - - def assertRunTestResults( - self, run_tests_report, expected_parse_results=None, msg=None - ): - """ - Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of - list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped - with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``. - Finally, asserts that the overall ``runTests()`` success value is ``True``. - - :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests - :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] - """ - run_test_success, run_test_results = run_tests_report - - if expected_parse_results is not None: - merged = [ - (*rpt, expected) - for rpt, expected in zip(run_test_results, expected_parse_results) - ] - for test_string, result, expected in merged: - # expected should be a tuple containing a list and/or a dict or an exception, - # and optional failure message string - # an empty tuple will skip any result validation - fail_msg = next( - (exp for exp in expected if isinstance(exp, str)), None - ) - expected_exception = next( - ( - exp - for exp in expected - if isinstance(exp, type) and issubclass(exp, Exception) - ), - None, - ) - if expected_exception is not None: - with self.assertRaises( - expected_exception=expected_exception, msg=fail_msg or msg - ): - if isinstance(result, Exception): - raise result - else: - expected_list = next( - (exp for exp in expected if isinstance(exp, list)), None - ) - expected_dict = next( - (exp for exp in expected if isinstance(exp, dict)), None - ) - if (expected_list, expected_dict) != (None, None): - self.assertParseResultsEquals( - result, - expected_list=expected_list, - expected_dict=expected_dict, - msg=fail_msg or msg, - ) - else: - # warning here maybe? - print("no validation for {!r}".format(test_string)) - - # do this last, in case some specific test results can be reported instead - self.assertTrue( - run_test_success, msg=msg if msg is not None else "failed runTests" - ) - - @contextmanager - def assertRaisesParseException(self, exc_type=ParseException, msg=None): - with self.assertRaises(exc_type, msg=msg): - yield - - @staticmethod - def with_line_numbers( - s: str, - start_line: typing.Optional[int] = None, - end_line: typing.Optional[int] = None, - expand_tabs: bool = True, - eol_mark: str = "|", - mark_spaces: typing.Optional[str] = None, - mark_control: typing.Optional[str] = None, - ) -> str: - """ - Helpful method for debugging a parser - prints a string with line and column numbers. - (Line and column numbers are 1-based.) - - :param s: tuple(bool, str - string to be printed with line and column numbers - :param start_line: int - (optional) starting line number in s to print (default=1) - :param end_line: int - (optional) ending line number in s to print (default=len(s)) - :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default - :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|") - :param mark_spaces: str - (optional) special character to display in place of spaces - :param mark_control: str - (optional) convert non-printing control characters to a placeholding - character; valid values: - - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊" - - any single character string - replace control characters with given string - - None (default) - string is displayed as-is - - :return: str - input string with leading line numbers and column number headers - """ - if expand_tabs: - s = s.expandtabs() - if mark_control is not None: - if mark_control == "unicode": - tbl = str.maketrans( - {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))} - | {127: 0x2421} - ) - eol_mark = "" - else: - tbl = str.maketrans( - {c: mark_control for c in list(range(0, 32)) + [127]} - ) - s = s.translate(tbl) - if mark_spaces is not None and mark_spaces != " ": - if mark_spaces == "unicode": - tbl = str.maketrans({9: 0x2409, 32: 0x2423}) - s = s.translate(tbl) - else: - s = s.replace(" ", mark_spaces) - if start_line is None: - start_line = 1 - if end_line is None: - end_line = len(s) - end_line = min(end_line, len(s)) - start_line = min(max(1, start_line), end_line) - - if mark_control != "unicode": - s_lines = s.splitlines()[start_line - 1 : end_line] - else: - s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]] - if not s_lines: - return "" - - lineno_width = len(str(end_line)) - max_line_len = max(len(line) for line in s_lines) - lead = " " * (lineno_width + 1) - if max_line_len >= 99: - header0 = ( - lead - + "".join( - "{}{}".format(" " * 99, (i + 1) % 100) - for i in range(max(max_line_len // 100, 1)) - ) - + "\n" - ) - else: - header0 = "" - header1 = ( - header0 - + lead - + "".join( - " {}".format((i + 1) % 10) - for i in range(-(-max_line_len // 10)) - ) - + "\n" - ) - header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n" - return ( - header1 - + header2 - + "\n".join( - "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark) - for i, line in enumerate(s_lines, start=start_line) - ) - + "\n" - ) diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/__init__.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/__init__.py deleted file mode 100644 index ab3c63b5b456a7fb878757e25768a3634f76ae5b..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/transforms/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from fvcore.transforms.transform import Transform, TransformList # order them first -from fvcore.transforms.transform import * -from .transform import * -from .augmentation import * -from .augmentation_impl import * - -__all__ = [k for k in globals().keys() if not k.startswith("_")] - - -from detectron2.utils.env import fixup_module_metadata - -fixup_module_metadata(__name__, globals(), __all__) -del fixup_module_metadata diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/image_list.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/image_list.py deleted file mode 100644 index b31b2d395dffb9d3694239d1aa73615899975f4e..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/structures/image_list.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from __future__ import division -from typing import Any, List, Tuple -import torch -from torch import device -from torch.nn import functional as F - -from detectron2.layers.wrappers import shapes_to_tensor - - -class ImageList(object): - """ - Structure that holds a list of images (of possibly - varying sizes) as a single tensor. - This works by padding the images to the same size. - The original sizes of each image is stored in `image_sizes`. - - Attributes: - image_sizes (list[tuple[int, int]]): each tuple is (h, w). - During tracing, it becomes list[Tensor] instead. - """ - - def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]): - """ - Arguments: - tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1 - image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can - be smaller than (H, W) due to padding. - """ - self.tensor = tensor - self.image_sizes = image_sizes - - def __len__(self) -> int: - return len(self.image_sizes) - - def __getitem__(self, idx) -> torch.Tensor: - """ - Access the individual image in its original size. - - Args: - idx: int or slice - - Returns: - Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1 - """ - size = self.image_sizes[idx] - return self.tensor[idx, ..., : size[0], : size[1]] - - @torch.jit.unused - def to(self, *args: Any, **kwargs: Any) -> "ImageList": - cast_tensor = self.tensor.to(*args, **kwargs) - return ImageList(cast_tensor, self.image_sizes) - - @property - def device(self) -> device: - return self.tensor.device - - @staticmethod - def from_tensors( - tensors: List[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0 - ) -> "ImageList": - """ - Args: - tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or - (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded - to the same shape with `pad_value`. - size_divisibility (int): If `size_divisibility > 0`, add padding to ensure - the common height and width is divisible by `size_divisibility`. - This depends on the model and many models need a divisibility of 32. - pad_value (float): value to pad - - Returns: - an `ImageList`. - """ - assert len(tensors) > 0 - assert isinstance(tensors, (tuple, list)) - for t in tensors: - assert isinstance(t, torch.Tensor), type(t) - assert t.shape[:-2] == tensors[0].shape[:-2], t.shape - - image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors] - image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes] - max_size = torch.stack(image_sizes_tensor).max(0).values - - if size_divisibility > 1: - stride = size_divisibility - # the last two dims are H,W, both subject to divisibility requirement - max_size = (max_size + (stride - 1)).div(stride, rounding_mode="floor") * stride - - # handle weirdness of scripting and tracing ... - if torch.jit.is_scripting(): - max_size: List[int] = max_size.to(dtype=torch.long).tolist() - else: - if torch.jit.is_tracing(): - image_sizes = image_sizes_tensor - - if len(tensors) == 1: - # This seems slightly (2%) faster. - # TODO: check whether it's faster for multiple images as well - image_size = image_sizes[0] - padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]] - batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0) - else: - # max_size can be a tensor in tracing mode, therefore convert to list - batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size) - batched_imgs = tensors[0].new_full(batch_shape, pad_value) - for img, pad_img in zip(tensors, batched_imgs): - pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img) - - return ImageList(batched_imgs.contiguous(), image_sizes) diff --git a/spaces/BatuhanYilmaz/Youtube-Transcriber/utils.py b/spaces/BatuhanYilmaz/Youtube-Transcriber/utils.py deleted file mode 100644 index 17252fa2ac5ac9e64887184574561fb0f340545a..0000000000000000000000000000000000000000 --- a/spaces/BatuhanYilmaz/Youtube-Transcriber/utils.py +++ /dev/null @@ -1,115 +0,0 @@ -import textwrap -import unicodedata -import re - -import zlib -from typing import Iterator, TextIO - - -def exact_div(x, y): - assert x % y == 0 - return x // y - - -def str2bool(string): - str2val = {"True": True, "False": False} - if string in str2val: - return str2val[string] - else: - raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}") - - -def optional_int(string): - return None if string == "None" else int(string) - - -def optional_float(string): - return None if string == "None" else float(string) - - -def compression_ratio(text) -> float: - return len(text) / len(zlib.compress(text.encode("utf-8"))) - - -def format_timestamp(seconds: float, always_include_hours: bool = False, fractionalSeperator: str = '.'): - assert seconds >= 0, "non-negative timestamp expected" - milliseconds = round(seconds * 1000.0) - - hours = milliseconds // 3_600_000 - milliseconds -= hours * 3_600_000 - - minutes = milliseconds // 60_000 - milliseconds -= minutes * 60_000 - - seconds = milliseconds // 1_000 - milliseconds -= seconds * 1_000 - - hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else "" - return f"{hours_marker}{minutes:02d}:{seconds:02d}{fractionalSeperator}{milliseconds:03d}" - - -def write_txt(transcript: Iterator[dict], file: TextIO): - for segment in transcript: - print(segment['text'].strip(), file=file, flush=True) - - -def write_vtt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None): - print("WEBVTT\n", file=file) - for segment in transcript: - text = processText(segment['text'], maxLineWidth).replace('-->', '->') - - print( - f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n" - f"{text}\n", - file=file, - flush=True, - ) - - -def write_srt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None): - """ - Write a transcript to a file in SRT format. - Example usage: - from pathlib import Path - from whisper.utils import write_srt - result = transcribe(model, audio_path, temperature=temperature, **args) - # save SRT - audio_basename = Path(audio_path).stem - with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt: - write_srt(result["segments"], file=srt) - """ - for i, segment in enumerate(transcript, start=1): - text = processText(segment['text'].strip(), maxLineWidth).replace('-->', '->') - - # write srt lines - print( - f"{i}\n" - f"{format_timestamp(segment['start'], always_include_hours=True, fractionalSeperator=',')} --> " - f"{format_timestamp(segment['end'], always_include_hours=True, fractionalSeperator=',')}\n" - f"{text}\n", - file=file, - flush=True, - ) - -def processText(text: str, maxLineWidth=None): - if (maxLineWidth is None or maxLineWidth < 0): - return text - - lines = textwrap.wrap(text, width=maxLineWidth, tabsize=4) - return '\n'.join(lines) - -def slugify(value, allow_unicode=False): - """ - Taken from https://github.com/django/django/blob/master/django/utils/text.py - Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated - dashes to single dashes. Remove characters that aren't alphanumerics, - underscores, or hyphens. Convert to lowercase. Also strip leading and - trailing whitespace, dashes, and underscores. - """ - value = str(value) - if allow_unicode: - value = unicodedata.normalize('NFKC', value) - else: - value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') - value = re.sub(r'[^\w\s-]', '', value.lower()) - return re.sub(r'[-\s]+', '-', value).strip('-_') \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/waiter.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/waiter.py deleted file mode 100644 index c5226d460e63b76ca11f878cf36747bcee427477..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/waiter.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You -# may not use this file except in compliance with the License. A copy of -# the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is -# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. -import os - -from botocore import xform_name -from botocore.compat import OrderedDict -from botocore.docs.bcdoc.restdoc import DocumentStructure -from botocore.docs.method import document_model_driven_method -from botocore.docs.utils import DocumentedShape -from botocore.utils import get_service_module_name - - -class WaiterDocumenter: - def __init__(self, client, service_waiter_model, root_docs_path): - self._client = client - self._client_class_name = self._client.__class__.__name__ - self._service_name = self._client.meta.service_model.service_name - self._service_waiter_model = service_waiter_model - self._root_docs_path = root_docs_path - self._USER_GUIDE_LINK = ( - 'https://boto3.amazonaws.com/' - 'v1/documentation/api/latest/guide/clients.html#waiters' - ) - - def document_waiters(self, section): - """Documents the various waiters for a service. - - :param section: The section to write to. - """ - section.style.h2('Waiters') - self._add_overview(section) - section.style.new_line() - section.writeln('The available waiters are:') - section.style.toctree() - for waiter_name in self._service_waiter_model.waiter_names: - section.style.tocitem(f'{self._service_name}/waiter/{waiter_name}') - # Create a new DocumentStructure for each waiter and add contents. - waiter_doc_structure = DocumentStructure( - waiter_name, target='html' - ) - self._add_single_waiter(waiter_doc_structure, waiter_name) - # Write waiters in individual/nested files. - # Path: /reference/services//waiter/.rst - waiter_dir_path = os.path.join( - self._root_docs_path, self._service_name, 'waiter' - ) - waiter_doc_structure.write_to_file(waiter_dir_path, waiter_name) - - def _add_single_waiter(self, section, waiter_name): - breadcrumb_section = section.add_new_section('breadcrumb') - breadcrumb_section.style.ref( - self._client_class_name, f'../../{self._service_name}' - ) - breadcrumb_section.write(f' / Waiter / {waiter_name}') - section.add_title_section(waiter_name) - waiter_section = section.add_new_section(waiter_name) - waiter_section.style.start_sphinx_py_class( - class_name=f"{self._client_class_name}.Waiter.{waiter_name}" - ) - - # Add example on how to instantiate waiter. - waiter_section.style.start_codeblock() - waiter_section.style.new_line() - waiter_section.write( - 'waiter = client.get_waiter(\'%s\')' % xform_name(waiter_name) - ) - waiter_section.style.end_codeblock() - - # Add information on the wait() method - waiter_section.style.new_line() - document_wait_method( - section=waiter_section, - waiter_name=waiter_name, - event_emitter=self._client.meta.events, - service_model=self._client.meta.service_model, - service_waiter_model=self._service_waiter_model, - ) - - def _add_overview(self, section): - section.style.new_line() - section.write( - 'Waiters are available on a client instance ' - 'via the ``get_waiter`` method. For more detailed instructions ' - 'and examples on the usage or waiters, see the ' - 'waiters ' - ) - section.style.external_link( - title='user guide', - link=self._USER_GUIDE_LINK, - ) - section.write('.') - section.style.new_line() - - -def document_wait_method( - section, - waiter_name, - event_emitter, - service_model, - service_waiter_model, - include_signature=True, -): - """Documents a the wait method of a waiter - - :param section: The section to write to - - :param waiter_name: The name of the waiter - - :param event_emitter: The event emitter to use to emit events - - :param service_model: The service model - - :param service_waiter_model: The waiter model associated to the service - - :param include_signature: Whether or not to include the signature. - It is useful for generating docstrings. - """ - waiter_model = service_waiter_model.get_waiter(waiter_name) - operation_model = service_model.operation_model(waiter_model.operation) - - waiter_config_members = OrderedDict() - - waiter_config_members['Delay'] = DocumentedShape( - name='Delay', - type_name='integer', - documentation=( - '

    The amount of time in seconds to wait between ' - 'attempts. Default: {}

    '.format(waiter_model.delay) - ), - ) - - waiter_config_members['MaxAttempts'] = DocumentedShape( - name='MaxAttempts', - type_name='integer', - documentation=( - '

    The maximum number of attempts to be made. ' - 'Default: {}

    '.format(waiter_model.max_attempts) - ), - ) - - botocore_waiter_params = [ - DocumentedShape( - name='WaiterConfig', - type_name='structure', - documentation=( - '

    A dictionary that provides parameters to control ' - 'waiting behavior.

    ' - ), - members=waiter_config_members, - ) - ] - - wait_description = ( - 'Polls :py:meth:`{}.Client.{}` every {} ' - 'seconds until a successful state is reached. An error is ' - 'returned after {} failed checks.'.format( - get_service_module_name(service_model), - xform_name(waiter_model.operation), - waiter_model.delay, - waiter_model.max_attempts, - ) - ) - - document_model_driven_method( - section, - 'wait', - operation_model, - event_emitter=event_emitter, - method_description=wait_description, - example_prefix='waiter.wait', - include_input=botocore_waiter_params, - document_output=False, - include_signature=include_signature, - ) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/euckrprober.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/euckrprober.py deleted file mode 100644 index 1fc5de0462cd9a09472cece4087cafe699da4fa7..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/euckrprober.py +++ /dev/null @@ -1,47 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .chardistribution import EUCKRDistributionAnalysis -from .codingstatemachine import CodingStateMachine -from .mbcharsetprober import MultiByteCharSetProber -from .mbcssm import EUCKR_SM_MODEL - - -class EUCKRProber(MultiByteCharSetProber): - def __init__(self) -> None: - super().__init__() - self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL) - self.distribution_analyzer = EUCKRDistributionAnalysis() - self.reset() - - @property - def charset_name(self) -> str: - return "EUC-KR" - - @property - def language(self) -> str: - return "Korean" diff --git a/spaces/CVPR/LIVE/thrust/dependencies/cub/tune/Makefile b/spaces/CVPR/LIVE/thrust/dependencies/cub/tune/Makefile deleted file mode 100644 index 926b340fe4af77d77663281c5874e11fe3a41be4..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/dependencies/cub/tune/Makefile +++ /dev/null @@ -1,192 +0,0 @@ -#/****************************************************************************** -# * Copyright (c) 2011, Duane Merrill. All rights reserved. -# * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. -# * -# * Redistribution and use in source and binary forms, with or without -# * modification, are permitted provided that the following conditions are met: -# * * Redistributions of source code must retain the above copyright -# * notice, this list of conditions and the following disclaimer. -# * * Redistributions in binary form must reproduce the above copyright -# * notice, this list of conditions and the following disclaimer in the -# * documentation and/or other materials provided with the distribution. -# * * Neither the name of the NVIDIA CORPORATION nor the -# * names of its contributors may be used to endorse or promote products -# * derived from this software without specific prior written permission. -# * -# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY -# * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# * -#******************************************************************************/ - -#------------------------------------------------------------------------------- -# Build script for project -#------------------------------------------------------------------------------- - -NVCC = "$(shell which nvcc)" -NVCC_VERSION = $(strip $(shell nvcc --version | grep release | sed 's/.*release //' | sed 's/,.*//')) - -# detect OS -OSUPPER = $(shell uname -s 2>/dev/null | tr [:lower:] [:upper:]) - -#------------------------------------------------------------------------------- -# Libs -#------------------------------------------------------------------------------- - - -#------------------------------------------------------------------------------- -# Includes -#------------------------------------------------------------------------------- - -INC = -I. -I.. -I../test - -#------------------------------------------------------------------------------- -# Libs -#------------------------------------------------------------------------------- - -LIBS += -lcudart - -#------------------------------------------------------------------------------- -# Defines -#------------------------------------------------------------------------------- - -DEFINES = - -#------------------------------------------------------------------------------- -# SM Arch -#------------------------------------------------------------------------------- - -ifdef sm - SM_ARCH = $(sm) -else - SM_ARCH = 200 -endif - -# Only one arch per tuning binary -ifeq (350, $(findstring 350, $(SM_ARCH))) - SM_TARGETS = -arch=sm_35 - SM_ARCH = 350 -endif -ifeq (300, $(findstring 300, $(SM_ARCH))) - SM_TARGETS = -arch=sm_30 - SM_ARCH = 300 -endif -ifeq (200, $(findstring 200, $(SM_ARCH))) - SM_TARGETS = -arch=sm_20 - SM_ARCH = 200 -endif -ifeq (130, $(findstring 130, $(SM_ARCH))) - SM_TARGETS = -arch=sm_13 - SM_ARCH = 130 -endif -ifeq (110, $(findstring 110, $(SM_ARCH))) - SM_TARGETS = -arch=sm_11 - SM_ARCH = 110 -endif -ifeq (100, $(findstring 100, $(SM_ARCH))) - SM_TARGETS = -arch=sm_10 - SM_ARCH = 100 -endif - - -#------------------------------------------------------------------------------- -# Compiler Flags -#------------------------------------------------------------------------------- - -NVCCFLAGS = -Xptxas -v -Xcudafe -\# - -# Help the compiler/linker work with huge numbers of kernels on Windows -ifeq (WIN_NT, $(findstring WIN_NT, $(OSUPPER))) - NVCCFLAGS += -Xcompiler /bigobj -Xcompiler /Zm500 -endif - -# 32/64-bit (32-bit device pointers by default) -ifeq ($(force32), 1) - CPU_ARCH = -m32 - CPU_ARCH_SUFFIX = i386 -else - CPU_ARCH = -m64 - CPU_ARCH_SUFFIX = x86_64 -endif - -# CUDA ABI enable/disable (enabled by default) -ifneq ($(abi), 0) - ABI_SUFFIX = abi -else - NVCCFLAGS += -Xptxas -abi=no - ABI_SUFFIX = noabi -endif - -# NVVM/Open64 middle-end compiler (nvvm by default) -ifeq ($(open64), 1) - NVCCFLAGS += -open64 - PTX_SUFFIX = open64 -else - PTX_SUFFIX = nvvm -endif - -# Verbose toolchain output from nvcc -ifeq ($(verbose), 1) - NVCCFLAGS += -v -endif - -# Keep intermediate compilation artifacts -ifeq ($(keep), 1) - NVCCFLAGS += -keep -endif - -# Data type size to compile a schmoo binary for -ifdef tunesize - TUNE_SIZE = $(tunesize) -else - TUNE_SIZE = 4 -endif - - -SUFFIX = $(TUNE_SIZE)B_sm$(SM_ARCH)_$(PTX_SUFFIX)_$(NVCC_VERSION)_$(ABI_SUFFIX)_$(CPU_ARCH_SUFFIX) - -#------------------------------------------------------------------------------- -# Dependency Lists -#------------------------------------------------------------------------------- - -rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) - -DEPS = ./Makefile \ - ../test/test_util.h \ - $(call rwildcard,../cub/,*.cuh) - - -#------------------------------------------------------------------------------- -# make default -#------------------------------------------------------------------------------- - -default: - - -#------------------------------------------------------------------------------- -# make clean -#------------------------------------------------------------------------------- - -clean : - rm -f bin/*$(CPU_ARCH_SUFFIX)* - rm -f *.i* *.cubin *.cu.c *.cudafe* *.fatbin.c *.ptx *.hash *.cu.cpp *.o - - - -#------------------------------------------------------------------------------- -# make tune_device_reduce -#------------------------------------------------------------------------------- - -tune_device_reduce: bin/tune_device_reduce_$(SUFFIX) - -bin/tune_device_reduce_$(SUFFIX) : tune_device_reduce.cu $(DEPS) - mkdir -p bin - $(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/tune_device_reduce_$(SUFFIX) tune_device_reduce.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3 -DTUNE_ARCH=$(SM_ARCH) -DTUNE_SIZE=$(TUNE_SIZE) - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/generate.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/generate.h deleted file mode 100644 index 5b1d7b4bac5f24a40072e461d9bce530dfa12319..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/generate.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a fill of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// the purpose of this header is to #include the generate.h header -// of the sequential, host, and device systems. It should be #included in any -// code which uses adl to dispatch generate - -#include - -// SCons can't see through the #defines below to figure out what this header -// includes, so we fake it out by specifying all possible files we might end up -// including inside an #if 0. -#if 0 -#include -#include -#include -#include -#endif - -#define __THRUST_HOST_SYSTEM_GENERATE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/generate.h> -#include __THRUST_HOST_SYSTEM_GENERATE_HEADER -#undef __THRUST_HOST_SYSTEM_GENERATE_HEADER - -#define __THRUST_DEVICE_SYSTEM_GENERATE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/generate.h> -#include __THRUST_DEVICE_SYSTEM_GENERATE_HEADER -#undef __THRUST_DEVICE_SYSTEM_GENERATE_HEADER - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/copy.h b/spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/copy.h deleted file mode 100644 index 7977768b02be1812799733462f1a162632a9c53f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/copy.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include - -namespace thrust -{ -namespace system -{ -namespace tbb -{ -namespace detail -{ - - -template -OutputIterator copy(execution_policy &exec, - InputIterator first, - InputIterator last, - OutputIterator result); - - -template -OutputIterator copy_n(execution_policy &exec, - InputIterator first, - Size n, - OutputIterator result); - - -} // end namespace detail -} // end namespace tbb -} // end namespace system -} // end namespace thrust - -#include - diff --git a/spaces/CVPR/Text2Human/Text2Human/data/segm_attr_dataset.py b/spaces/CVPR/Text2Human/Text2Human/data/segm_attr_dataset.py deleted file mode 100644 index 2ab45cb71bce2f20e703f8293a7f2b430c1aaa4e..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Text2Human/Text2Human/data/segm_attr_dataset.py +++ /dev/null @@ -1,167 +0,0 @@ -import os -import os.path -import random - -import numpy as np -import torch -import torch.utils.data as data -from PIL import Image - - -class DeepFashionAttrSegmDataset(data.Dataset): - - def __init__(self, - img_dir, - segm_dir, - pose_dir, - ann_dir, - downsample_factor=2, - xflip=False): - self._img_path = img_dir - self._densepose_path = pose_dir - self._segm_path = segm_dir - self._image_fnames = [] - self.upper_fused_attrs = [] - self.lower_fused_attrs = [] - self.outer_fused_attrs = [] - - self.downsample_factor = downsample_factor - self.xflip = xflip - - # load attributes - assert os.path.exists(f'{ann_dir}/upper_fused.txt') - for idx, row in enumerate( - open(os.path.join(f'{ann_dir}/upper_fused.txt'), 'r')): - annotations = row.split() - self._image_fnames.append(annotations[0]) - # assert self._image_fnames[idx] == annotations[0] - self.upper_fused_attrs.append(int(annotations[1])) - - assert len(self._image_fnames) == len(self.upper_fused_attrs) - - assert os.path.exists(f'{ann_dir}/lower_fused.txt') - for idx, row in enumerate( - open(os.path.join(f'{ann_dir}/lower_fused.txt'), 'r')): - annotations = row.split() - assert self._image_fnames[idx] == annotations[0] - self.lower_fused_attrs.append(int(annotations[1])) - - assert len(self._image_fnames) == len(self.lower_fused_attrs) - - assert os.path.exists(f'{ann_dir}/outer_fused.txt') - for idx, row in enumerate( - open(os.path.join(f'{ann_dir}/outer_fused.txt'), 'r')): - annotations = row.split() - assert self._image_fnames[idx] == annotations[0] - self.outer_fused_attrs.append(int(annotations[1])) - - assert len(self._image_fnames) == len(self.outer_fused_attrs) - - # remove the overlapping item between upper cls and lower cls - # cls 21 can appear with upper clothes - # cls 4 can appear with lower clothes - self.upper_cls = [1., 4.] - self.lower_cls = [3., 5., 21.] - self.outer_cls = [2.] - self.other_cls = [ - 11., 18., 7., 8., 9., 10., 12., 16., 17., 19., 20., 22., 23., 15., - 14., 13., 0., 6. - ] - - def _open_file(self, path_prefix, fname): - return open(os.path.join(path_prefix, fname), 'rb') - - def _load_raw_image(self, raw_idx): - fname = self._image_fnames[raw_idx] - with self._open_file(self._img_path, fname) as f: - image = Image.open(f) - if self.downsample_factor != 1: - width, height = image.size - width = width // self.downsample_factor - height = height // self.downsample_factor - image = image.resize( - size=(width, height), resample=Image.LANCZOS) - image = np.array(image) - if image.ndim == 2: - image = image[:, :, np.newaxis] # HW => HWC - image = image.transpose(2, 0, 1) # HWC => CHW - return image - - def _load_densepose(self, raw_idx): - fname = self._image_fnames[raw_idx] - fname = f'{fname[:-4]}_densepose.png' - with self._open_file(self._densepose_path, fname) as f: - densepose = Image.open(f) - if self.downsample_factor != 1: - width, height = densepose.size - width = width // self.downsample_factor - height = height // self.downsample_factor - densepose = densepose.resize( - size=(width, height), resample=Image.NEAREST) - # channel-wise IUV order, [3, H, W] - densepose = np.array(densepose)[:, :, 2:].transpose(2, 0, 1) - return densepose.astype(np.float32) - - def _load_segm(self, raw_idx): - fname = self._image_fnames[raw_idx] - fname = f'{fname[:-4]}_segm.png' - with self._open_file(self._segm_path, fname) as f: - segm = Image.open(f) - if self.downsample_factor != 1: - width, height = segm.size - width = width // self.downsample_factor - height = height // self.downsample_factor - segm = segm.resize( - size=(width, height), resample=Image.NEAREST) - segm = np.array(segm) - segm = segm[:, :, np.newaxis].transpose(2, 0, 1) - return segm.astype(np.float32) - - def __getitem__(self, index): - image = self._load_raw_image(index) - pose = self._load_densepose(index) - segm = self._load_segm(index) - - if self.xflip and random.random() > 0.5: - assert image.ndim == 3 # CHW - image = image[:, :, ::-1].copy() - pose = pose[:, :, ::-1].copy() - segm = segm[:, :, ::-1].copy() - - image = torch.from_numpy(image) - segm = torch.from_numpy(segm) - - upper_fused_attr = self.upper_fused_attrs[index] - lower_fused_attr = self.lower_fused_attrs[index] - outer_fused_attr = self.outer_fused_attrs[index] - - # mask 0: denotes the common codebook, - # mask (attr + 1): denotes the texture-specific codebook - mask = torch.zeros_like(segm) - if upper_fused_attr != 17: - for cls in self.upper_cls: - mask[segm == cls] = upper_fused_attr + 1 - - if lower_fused_attr != 17: - for cls in self.lower_cls: - mask[segm == cls] = lower_fused_attr + 1 - - if outer_fused_attr != 17: - for cls in self.outer_cls: - mask[segm == cls] = outer_fused_attr + 1 - - pose = pose / 12. - 1 - image = image / 127.5 - 1 - - return_dict = { - 'image': image, - 'densepose': pose, - 'segm': segm, - 'texture_mask': mask, - 'img_name': self._image_fnames[index] - } - - return return_dict - - def __len__(self): - return len(self._image_fnames) diff --git a/spaces/CVPR/regionclip-demo/detectron2/data/datasets/builtin_meta.py b/spaces/CVPR/regionclip-demo/detectron2/data/datasets/builtin_meta.py deleted file mode 100644 index 68b325f4c0326196736d39d6214898e38576e3c0..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/data/datasets/builtin_meta.py +++ /dev/null @@ -1,560 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -""" -Note: -For your custom dataset, there is no need to hard-code metadata anywhere in the code. -For example, for COCO-format dataset, metadata will be obtained automatically -when calling `load_coco_json`. For other dataset, metadata may also be obtained in other ways -during loading. - -However, we hard-coded metadata for a few common dataset here. -The only goal is to allow users who don't have these dataset to use pre-trained models. -Users don't have to download a COCO json (which contains metadata), in order to visualize a -COCO model (with correct class names and colors). -""" -# meta data for 65-48-17 zeroshot split of COCO -COCO_OVD_CATEGORIES = { - 'target': [ - {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, - {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, - {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, - {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, - {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, - {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, - {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, - {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, - {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, - {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, - {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, - {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, - {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, - {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, - {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, - {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, - {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, - ], - 'base': [ - {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, - {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, - {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, - {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, - {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, - {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, - {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, - {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, - {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, - {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, - {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, - {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, - {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, - {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, - {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, - {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, - {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, - {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, - {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, - {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, - {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, - {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, - {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, - {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, - {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, - {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, - {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, - {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, - {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, - {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, - {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, - {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, - {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, - {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, - {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, - {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, - {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, - {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, - {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, - {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, - {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, - {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, - {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, - {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, - {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, - {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, - {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, - {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, - ], - 'all': [ - {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, - {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, - {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, - {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, - {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, - {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, - {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, - {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, - {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, - {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, - {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, - {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, - {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, - {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, - {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, - {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, - {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, - {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, - {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, - {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, - {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, - {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, - {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, - {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, - {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, - {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, - {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, - {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, - {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, - {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, - {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, - {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, - {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, - {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, - {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, - {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, - {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, - {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, - {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, - {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, - {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, - {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, - {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, - {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, - {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, - {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, - {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, - {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, - {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, - {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, - {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, - {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, - {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, - {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, - {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, - {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, - {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, - {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, - {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, - {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, - {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, - {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, - {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, - {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, - {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, - ], - } - -# Classes not used in COCO_OVD_CATEGORIES -NOT_USED = [ - {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"}, - {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"}, - {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"}, - {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"}, - {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"}, - {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"}, - {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"}, - {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"}, - {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"}, - {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"}, - {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"}, - {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"}, - {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"}, - {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"}, - {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"}, - {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"}, - {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"}, - {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"}, - {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"}, - {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"}, - {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"}, - {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"}, - {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"}, - {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"}, - {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"}, - {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"}, - {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"}, - {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"}, - {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"}, - {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"}, - {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"}, - {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"}, - {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"}, - {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"}, - {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"}, - {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"}, - {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"}, - {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"}, - {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"}, - {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"}, - {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"}, - {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"}, - {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"}, - {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"}, - {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"}, - {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"}, - {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"}, - {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"}, - {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"}, - {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"}, - {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"}, - {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"}, - {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"}, - {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"}, - {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"}, - {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"}, - {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"}, - {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"}, - {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"}, - {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"}, - {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"}, - {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"}, - {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"}, - {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"}, - {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"}, - {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"}, - {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"}, - {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"}, - ] - -# All coco categories, together with their nice-looking visualization colors -# It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json -COCO_CATEGORIES = [ - {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, - {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, - {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, - {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, - {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, - {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, - {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, - {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, - {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, - {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"}, - {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"}, - {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"}, - {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"}, - {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, - {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, - {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, - {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, - {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, - {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, - {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, - {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, - {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, - {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, - {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, - {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, - {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, - {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, - {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, - {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, - {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, - {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, - {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, - {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"}, - {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, - {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"}, - {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"}, - {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, - {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, - {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"}, - {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, - {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"}, - {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, - {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, - {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, - {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, - {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, - {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, - {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, - {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, - {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, - {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, - {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, - {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"}, - {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, - {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, - {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, - {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, - {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, - {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"}, - {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, - {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"}, - {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, - {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, - {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, - {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, - {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, - {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, - {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"}, - {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, - {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, - {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, - {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, - {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, - {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, - {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, - {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, - {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, - {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"}, - {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"}, - {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, - {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"}, - {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"}, - {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"}, - {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"}, - {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"}, - {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"}, - {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"}, - {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"}, - {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"}, - {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"}, - {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"}, - {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"}, - {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"}, - {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"}, - {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"}, - {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"}, - {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"}, - {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"}, - {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"}, - {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"}, - {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"}, - {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"}, - {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"}, - {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"}, - {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"}, - {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"}, - {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"}, - {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"}, - {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"}, - {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"}, - {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"}, - {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"}, - {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"}, - {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"}, - {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"}, - {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"}, - {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"}, - {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"}, - {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"}, - {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"}, - {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"}, - {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"}, - {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"}, - {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"}, - {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"}, - {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"}, - {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"}, - {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"}, - {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"}, - {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"}, - {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"}, - {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"}, - {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"}, -] - -# fmt: off -COCO_PERSON_KEYPOINT_NAMES = ( - "nose", - "left_eye", "right_eye", - "left_ear", "right_ear", - "left_shoulder", "right_shoulder", - "left_elbow", "right_elbow", - "left_wrist", "right_wrist", - "left_hip", "right_hip", - "left_knee", "right_knee", - "left_ankle", "right_ankle", -) -# fmt: on - -# Pairs of keypoints that should be exchanged under horizontal flipping -COCO_PERSON_KEYPOINT_FLIP_MAP = ( - ("left_eye", "right_eye"), - ("left_ear", "right_ear"), - ("left_shoulder", "right_shoulder"), - ("left_elbow", "right_elbow"), - ("left_wrist", "right_wrist"), - ("left_hip", "right_hip"), - ("left_knee", "right_knee"), - ("left_ankle", "right_ankle"), -) - -# rules for pairs of keypoints to draw a line between, and the line color to use. -KEYPOINT_CONNECTION_RULES = [ - # face - ("left_ear", "left_eye", (102, 204, 255)), - ("right_ear", "right_eye", (51, 153, 255)), - ("left_eye", "nose", (102, 0, 204)), - ("nose", "right_eye", (51, 102, 255)), - # upper-body - ("left_shoulder", "right_shoulder", (255, 128, 0)), - ("left_shoulder", "left_elbow", (153, 255, 204)), - ("right_shoulder", "right_elbow", (128, 229, 255)), - ("left_elbow", "left_wrist", (153, 255, 153)), - ("right_elbow", "right_wrist", (102, 255, 224)), - # lower-body - ("left_hip", "right_hip", (255, 102, 0)), - ("left_hip", "left_knee", (255, 255, 77)), - ("right_hip", "right_knee", (153, 255, 204)), - ("left_knee", "left_ankle", (191, 255, 128)), - ("right_knee", "right_ankle", (255, 195, 77)), -] - -# All Cityscapes categories, together with their nice-looking visualization colors -# It's from https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py # noqa -CITYSCAPES_CATEGORIES = [ - {"color": (128, 64, 128), "isthing": 0, "id": 7, "trainId": 0, "name": "road"}, - {"color": (244, 35, 232), "isthing": 0, "id": 8, "trainId": 1, "name": "sidewalk"}, - {"color": (70, 70, 70), "isthing": 0, "id": 11, "trainId": 2, "name": "building"}, - {"color": (102, 102, 156), "isthing": 0, "id": 12, "trainId": 3, "name": "wall"}, - {"color": (190, 153, 153), "isthing": 0, "id": 13, "trainId": 4, "name": "fence"}, - {"color": (153, 153, 153), "isthing": 0, "id": 17, "trainId": 5, "name": "pole"}, - {"color": (250, 170, 30), "isthing": 0, "id": 19, "trainId": 6, "name": "traffic light"}, - {"color": (220, 220, 0), "isthing": 0, "id": 20, "trainId": 7, "name": "traffic sign"}, - {"color": (107, 142, 35), "isthing": 0, "id": 21, "trainId": 8, "name": "vegetation"}, - {"color": (152, 251, 152), "isthing": 0, "id": 22, "trainId": 9, "name": "terrain"}, - {"color": (70, 130, 180), "isthing": 0, "id": 23, "trainId": 10, "name": "sky"}, - {"color": (220, 20, 60), "isthing": 1, "id": 24, "trainId": 11, "name": "person"}, - {"color": (255, 0, 0), "isthing": 1, "id": 25, "trainId": 12, "name": "rider"}, - {"color": (0, 0, 142), "isthing": 1, "id": 26, "trainId": 13, "name": "car"}, - {"color": (0, 0, 70), "isthing": 1, "id": 27, "trainId": 14, "name": "truck"}, - {"color": (0, 60, 100), "isthing": 1, "id": 28, "trainId": 15, "name": "bus"}, - {"color": (0, 80, 100), "isthing": 1, "id": 31, "trainId": 16, "name": "train"}, - {"color": (0, 0, 230), "isthing": 1, "id": 32, "trainId": 17, "name": "motorcycle"}, - {"color": (119, 11, 32), "isthing": 1, "id": 33, "trainId": 18, "name": "bicycle"}, -] - -# fmt: off -ADE20K_SEM_SEG_CATEGORIES = [ - "wall", "building", "sky", "floor", "tree", "ceiling", "road, route", "bed", "window ", "grass", "cabinet", "sidewalk, pavement", "person", "earth, ground", "door", "table", "mountain, mount", "plant", "curtain", "chair", "car", "water", "painting, picture", "sofa", "shelf", "house", "sea", "mirror", "rug", "field", "armchair", "seat", "fence", "desk", "rock, stone", "wardrobe, closet, press", "lamp", "tub", "rail", "cushion", "base, pedestal, stand", "box", "column, pillar", "signboard, sign", "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", "skyscraper", "fireplace", "refrigerator, icebox", "grandstand, covered stand", "path", "stairs", "runway", "case, display case, showcase, vitrine", "pool table, billiard table, snooker table", "pillow", "screen door, screen", "stairway, staircase", "river", "bridge, span", "bookcase", "blind, screen", "coffee table", "toilet, can, commode, crapper, pot, potty, stool, throne", "flower", "book", "hill", "bench", "countertop", "stove", "palm, palm tree", "kitchen island", "computer", "swivel chair", "boat", "bar", "arcade machine", "hovel, hut, hutch, shack, shanty", "bus", "towel", "light", "truck", "tower", "chandelier", "awning, sunshade, sunblind", "street lamp", "booth", "tv", "plane", "dirt track", "clothes", "pole", "land, ground, soil", "bannister, banister, balustrade, balusters, handrail", "escalator, moving staircase, moving stairway", "ottoman, pouf, pouffe, puff, hassock", "bottle", "buffet, counter, sideboard", "poster, posting, placard, notice, bill, card", "stage", "van", "ship", "fountain", "conveyer belt, conveyor belt, conveyer, conveyor, transporter", "canopy", "washer, automatic washer, washing machine", "plaything, toy", "pool", "stool", "barrel, cask", "basket, handbasket", "falls", "tent", "bag", "minibike, motorbike", "cradle", "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", "trade name", "microwave", "pot", "animal", "bicycle", "lake", "dishwasher", "screen", "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", "traffic light", "tray", "trash can", "fan", "pier", "crt screen", "plate", "monitor", "bulletin board", "shower", "radiator", "glass, drinking glass", "clock", "flag", # noqa -] -# After processed by `prepare_ade20k_sem_seg.py`, id 255 means ignore -# fmt: on - - -def _get_coco_instances_meta(): - thing_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 1] - thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1] - assert len(thing_ids) == 80, len(thing_ids) - # Mapping from the incontiguous COCO category id to an id in [0, 79] - thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)} - thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1] - ret = { - "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id, - "thing_classes": thing_classes, - "thing_colors": thing_colors, - } - return ret - - -def _get_coco_panoptic_separated_meta(): - """ - Returns metadata for "separated" version of the panoptic segmentation dataset. - """ - stuff_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 0] - assert len(stuff_ids) == 53, len(stuff_ids) - - # For semantic segmentation, this mapping maps from contiguous stuff id - # (in [0, 53], used in models) to ids in the dataset (used for processing results) - # The id 0 is mapped to an extra category "thing". - stuff_dataset_id_to_contiguous_id = {k: i + 1 for i, k in enumerate(stuff_ids)} - # When converting COCO panoptic annotations to semantic annotations - # We label the "thing" category to 0 - stuff_dataset_id_to_contiguous_id[0] = 0 - - # 54 names for COCO stuff categories (including "things") - stuff_classes = ["things"] + [ - k["name"].replace("-other", "").replace("-merged", "") - for k in COCO_CATEGORIES - if k["isthing"] == 0 - ] - - # NOTE: I randomly picked a color for things - stuff_colors = [[82, 18, 128]] + [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 0] - ret = { - "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id, - "stuff_classes": stuff_classes, - "stuff_colors": stuff_colors, - } - ret.update(_get_coco_instances_meta()) - return ret - - -def _get_builtin_metadata(dataset_name): - if dataset_name == "coco": - return _get_coco_instances_meta() - if dataset_name == "coco_panoptic_separated": - return _get_coco_panoptic_separated_meta() - elif dataset_name == "coco_panoptic_standard": - meta = {} - # The following metadata maps contiguous id from [0, #thing categories + - # #stuff categories) to their names and colors. We have to replica of the - # same name and color under "thing_*" and "stuff_*" because the current - # visualization function in D2 handles thing and class classes differently - # due to some heuristic used in Panoptic FPN. We keep the same naming to - # enable reusing existing visualization functions. - thing_classes = [k["name"] for k in COCO_CATEGORIES] - thing_colors = [k["color"] for k in COCO_CATEGORIES] - stuff_classes = [k["name"] for k in COCO_CATEGORIES] - stuff_colors = [k["color"] for k in COCO_CATEGORIES] - - meta["thing_classes"] = thing_classes - meta["thing_colors"] = thing_colors - meta["stuff_classes"] = stuff_classes - meta["stuff_colors"] = stuff_colors - - # Convert category id for training: - # category id: like semantic segmentation, it is the class id for each - # pixel. Since there are some classes not used in evaluation, the category - # id is not always contiguous and thus we have two set of category ids: - # - original category id: category id in the original dataset, mainly - # used for evaluation. - # - contiguous category id: [0, #classes), in order to train the linear - # softmax classifier. - thing_dataset_id_to_contiguous_id = {} - stuff_dataset_id_to_contiguous_id = {} - - for i, cat in enumerate(COCO_CATEGORIES): - if cat["isthing"]: - thing_dataset_id_to_contiguous_id[cat["id"]] = i - else: - stuff_dataset_id_to_contiguous_id[cat["id"]] = i - - meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id - meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id - - return meta - elif dataset_name == "coco_person": - return { - "thing_classes": ["person"], - "keypoint_names": COCO_PERSON_KEYPOINT_NAMES, - "keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP, - "keypoint_connection_rules": KEYPOINT_CONNECTION_RULES, - } - elif dataset_name == "cityscapes": - # fmt: off - CITYSCAPES_THING_CLASSES = [ - "person", "rider", "car", "truck", - "bus", "train", "motorcycle", "bicycle", - ] - CITYSCAPES_STUFF_CLASSES = [ - "road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", - "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", - "truck", "bus", "train", "motorcycle", "bicycle", - ] - # fmt: on - return { - "thing_classes": CITYSCAPES_THING_CLASSES, - "stuff_classes": CITYSCAPES_STUFF_CLASSES, - } - raise KeyError("No built-in metadata for dataset {}".format(dataset_name)) diff --git a/spaces/CVPR/regionclip-demo/detectron2/utils/testing.py b/spaces/CVPR/regionclip-demo/detectron2/utils/testing.py deleted file mode 100644 index a2e94076e79625d4853f86164e690d1720455e0f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/utils/testing.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import io -import numpy as np -import torch - -from detectron2 import model_zoo -from detectron2.data import DatasetCatalog -from detectron2.data.detection_utils import read_image -from detectron2.modeling import build_model -from detectron2.structures import Boxes, Instances, ROIMasks -from detectron2.utils.file_io import PathManager - - -""" -Internal utilities for tests. Don't use except for writing tests. -""" - - -def get_model_no_weights(config_path): - """ - Like model_zoo.get, but do not load any weights (even pretrained) - """ - cfg = model_zoo.get_config(config_path) - if not torch.cuda.is_available(): - cfg.MODEL.DEVICE = "cpu" - return build_model(cfg) - - -def random_boxes(num_boxes, max_coord=100, device="cpu"): - """ - Create a random Nx4 boxes tensor, with coordinates < max_coord. - """ - boxes = torch.rand(num_boxes, 4, device=device) * (max_coord * 0.5) - boxes.clamp_(min=1.0) # tiny boxes cause numerical instability in box regression - # Note: the implementation of this function in torchvision is: - # boxes[:, 2:] += torch.rand(N, 2) * 100 - # but it does not guarantee non-negative widths/heights constraints: - # boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]: - boxes[:, 2:] += boxes[:, :2] - return boxes - - -def get_sample_coco_image(tensor=True): - """ - Args: - tensor (bool): if True, returns 3xHxW tensor. - else, returns a HxWx3 numpy array. - - Returns: - an image, in BGR color. - """ - try: - file_name = DatasetCatalog.get("coco_2017_val_100")[0]["file_name"] - if not PathManager.exists(file_name): - raise FileNotFoundError() - except IOError: - # for public CI to run - file_name = "http://images.cocodataset.org/train2017/000000000009.jpg" - ret = read_image(file_name, format="BGR") - if tensor: - ret = torch.from_numpy(np.ascontiguousarray(ret.transpose(2, 0, 1))) - return ret - - -def convert_scripted_instances(instances): - """ - Convert a scripted Instances object to a regular :class:`Instances` object - """ - ret = Instances(instances.image_size) - for name in instances._field_names: - val = getattr(instances, "_" + name, None) - if val is not None: - ret.set(name, val) - return ret - - -def assert_instances_allclose(input, other, *, rtol=1e-5, msg="", size_as_tensor=False): - """ - Args: - input, other (Instances): - size_as_tensor: compare image_size of the Instances as tensors (instead of tuples). - Useful for comparing outputs of tracing. - """ - if not isinstance(input, Instances): - input = convert_scripted_instances(input) - if not isinstance(other, Instances): - other = convert_scripted_instances(other) - - if not msg: - msg = "Two Instances are different! " - else: - msg = msg.rstrip() + " " - - size_error_msg = msg + f"image_size is {input.image_size} vs. {other.image_size}!" - if size_as_tensor: - assert torch.equal( - torch.tensor(input.image_size), torch.tensor(other.image_size) - ), size_error_msg - else: - assert input.image_size == other.image_size, size_error_msg - fields = sorted(input.get_fields().keys()) - fields_other = sorted(other.get_fields().keys()) - assert fields == fields_other, msg + f"Fields are {fields} vs {fields_other}!" - - for f in fields: - val1, val2 = input.get(f), other.get(f) - if isinstance(val1, (Boxes, ROIMasks)): - # boxes in the range of O(100) and can have a larger tolerance - assert torch.allclose(val1.tensor, val2.tensor, atol=100 * rtol), ( - msg + f"Field {f} differs too much!" - ) - elif isinstance(val1, torch.Tensor): - if val1.dtype.is_floating_point: - mag = torch.abs(val1).max().cpu().item() - assert torch.allclose(val1, val2, atol=mag * rtol), ( - msg + f"Field {f} differs too much!" - ) - else: - assert torch.equal(val1, val2), msg + f"Field {f} is different!" - else: - raise ValueError(f"Don't know how to compare type {type(val1)}") - - -def reload_script_model(module): - """ - Save a jit module and load it back. - Similar to the `getExportImportCopy` function in torch/testing/ - """ - buffer = io.BytesIO() - torch.jit.save(module, buffer) - buffer.seek(0) - return torch.jit.load(buffer) diff --git a/spaces/ChallengeHub/Chinese-LangChain/tests/test_duckpy.py b/spaces/ChallengeHub/Chinese-LangChain/tests/test_duckpy.py deleted file mode 100644 index 78e9e28195a4699df1e012335b03787535b1a87b..0000000000000000000000000000000000000000 --- a/spaces/ChallengeHub/Chinese-LangChain/tests/test_duckpy.py +++ /dev/null @@ -1,15 +0,0 @@ -from duckpy import Client - -client = Client() - -results = client.search("Python Wikipedia") - -# Prints first result title -print(results[0].title) - -# Prints first result URL -print(results[0].url) - -# Prints first result description -print(results[0].description) -# https://github.com/AmanoTeam/duckpy \ No newline at end of file diff --git a/spaces/ChandraMohanNayal/AutoGPT/autogpt/config/singleton.py b/spaces/ChandraMohanNayal/AutoGPT/autogpt/config/singleton.py deleted file mode 100644 index 55b2aeea120bbe51ca837265fcb7fbff467e55f2..0000000000000000000000000000000000000000 --- a/spaces/ChandraMohanNayal/AutoGPT/autogpt/config/singleton.py +++ /dev/null @@ -1,24 +0,0 @@ -"""The singleton metaclass for ensuring only one instance of a class.""" -import abc - - -class Singleton(abc.ABCMeta, type): - """ - Singleton metaclass for ensuring only one instance of a class. - """ - - _instances = {} - - def __call__(cls, *args, **kwargs): - """Call method for the singleton metaclass.""" - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class AbstractSingleton(abc.ABC, metaclass=Singleton): - """ - Abstract singleton class for ensuring only one instance of a class. - """ - - pass diff --git a/spaces/CjangCjengh/Shanghainese-TTS/monotonic_align/__init__.py b/spaces/CjangCjengh/Shanghainese-TTS/monotonic_align/__init__.py deleted file mode 100644 index 49e32c9a128aeadc2044c362ff27f6a43f6d7815..0000000000000000000000000000000000000000 --- a/spaces/CjangCjengh/Shanghainese-TTS/monotonic_align/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - -def maximum_path(neg_cent, mask): - """ numba optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) diff --git a/spaces/Clementapa/orang-outan-image-video-detection/style.css b/spaces/Clementapa/orang-outan-image-video-detection/style.css deleted file mode 100644 index 6b8b21e65e3caf32a7bd24814eb6b27d41faf46b..0000000000000000000000000000000000000000 --- a/spaces/Clementapa/orang-outan-image-video-detection/style.css +++ /dev/null @@ -1,10 +0,0 @@ -#disp_image { - text-align: center; - /* Horizontally center the content */ -} - -#duplicate-button { - margin-left: auto; - color: #fff; - background: #1565c0; -} \ No newline at end of file diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-d80d0bbf.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-d80d0bbf.js deleted file mode 100644 index f98fb1843cc55d149be4830c5fe78570b0729e95..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-d80d0bbf.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as N,e as O,s as P,N as G,O as H,k as Q,K as r,p as j,o as R,Q as K,z as D,v as I,A as q,x as T,a1 as J,B as V,a9 as L,ab as M,ac as Y,ad as Z,h as x,a4 as p,at as $,au as ee,P as le,R as ie,a7 as ne,F as te}from"./index-1d65707a.js";import{a as ae}from"./Button-f155035a.js";import{b as se}from"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";import{X as fe}from"./Blocks-c9e1499d.js";function ue(l){let e;const i=l[17].default,n=L(i,l,l[19],null);return{c(){n&&n.c()},m(s,u){n&&n.m(s,u),e=!0},p(s,u){n&&n.p&&(!e||u&524288)&&M(n,i,s,s[19],e?Z(i,s[19],u,null):Y(s[19]),null)},i(s){e||(D(n,s),e=!0)},o(s){I(n,s),e=!1},d(s){n&&n.d(s)}}}function _e(l){let e,i,n,s,u,h,c,m,d,g;return c=new ae({props:{size:l[4],variant:l[8],elem_id:l[0],elem_classes:l[1],visible:l[2],scale:l[5],min_width:l[6],disabled:l[7]==="static",$$slots:{default:[ue]},$$scope:{ctx:l}}}),c.$on("click",l[12]),{c(){e=G("input"),h=H(),Q(c.$$.fragment),r(e,"class","hide svelte-ydeks8"),r(e,"accept",l[11]),r(e,"type","file"),e.multiple=i=l[3]==="multiple"||void 0,r(e,"webkitdirectory",n=l[3]==="directory"||void 0),r(e,"mozdirectory",s=l[3]==="directory"||void 0),r(e,"data-testid",u=l[9]+"-upload-button")},m(f,_){j(f,e,_),l[18](e),j(f,h,_),R(c,f,_),m=!0,d||(g=[K(e,"change",l[13]),K(e,"click",l[14])],d=!0)},p(f,[_]){(!m||_&2048)&&r(e,"accept",f[11]),(!m||_&8&&i!==(i=f[3]==="multiple"||void 0))&&(e.multiple=i),(!m||_&8&&n!==(n=f[3]==="directory"||void 0))&&r(e,"webkitdirectory",n),(!m||_&8&&s!==(s=f[3]==="directory"||void 0))&&r(e,"mozdirectory",s),(!m||_&512&&u!==(u=f[9]+"-upload-button"))&&r(e,"data-testid",u);const o={};_&16&&(o.size=f[4]),_&256&&(o.variant=f[8]),_&1&&(o.elem_id=f[0]),_&2&&(o.elem_classes=f[1]),_&4&&(o.visible=f[2]),_&32&&(o.scale=f[5]),_&64&&(o.min_width=f[6]),_&128&&(o.disabled=f[7]==="static"),_&524288&&(o.$$scope={dirty:_,ctx:f}),c.$set(o)},i(f){m||(D(c.$$.fragment,f),m=!0)},o(f){I(c.$$.fragment,f),m=!1},d(f){f&&(q(e),q(h)),l[18](null),T(c,f),d=!1,J(g)}}}function me(l,e,i){let{$$slots:n={},$$scope:s}=e,{elem_id:u=""}=e,{elem_classes:h=[]}=e,{visible:c=!0}=e,{file_count:m}=e,{file_types:d=[]}=e,{include_file_metadata:g=!0}=e,{size:f="lg"}=e,{scale:_=null}=e,{min_width:o=void 0}=e,{mode:k="dynamic"}=e,{variant:A="secondary"}=e,{label:B}=e,y;const E=V();let v;d==null?v=null:(d=d.map(t=>t.startsWith(".")?t:t+"/*"),v=d.join(", "));const C=()=>{y.click()},a=t=>{let w=Array.from(t);if(t.length){m==="single"&&(w=[t[0]]);var U=[];w.forEach((F,W)=>{U[W]=g?{name:F.name,size:F.size,data:"",blob:F}:F,U.filter(X=>X!==void 0).length===t.length&&E("load",m=="single"?U[0]:U)})}},S=t=>{const w=t.target;w.files&&a(w.files)},z=t=>{const w=t.target;w.value&&(w.value="")};function b(t){x[t?"unshift":"push"](()=>{y=t,i(10,y)})}return l.$$set=t=>{"elem_id"in t&&i(0,u=t.elem_id),"elem_classes"in t&&i(1,h=t.elem_classes),"visible"in t&&i(2,c=t.visible),"file_count"in t&&i(3,m=t.file_count),"file_types"in t&&i(15,d=t.file_types),"include_file_metadata"in t&&i(16,g=t.include_file_metadata),"size"in t&&i(4,f=t.size),"scale"in t&&i(5,_=t.scale),"min_width"in t&&i(6,o=t.min_width),"mode"in t&&i(7,k=t.mode),"variant"in t&&i(8,A=t.variant),"label"in t&&i(9,B=t.label),"$$scope"in t&&i(19,s=t.$$scope)},[u,h,c,m,f,_,o,k,A,B,y,v,C,S,z,d,g,n,b,s]}class oe extends N{constructor(e){super(),O(this,e,me,_e,P,{elem_id:0,elem_classes:1,visible:2,file_count:3,file_types:15,include_file_metadata:16,size:4,scale:5,min_width:6,mode:7,variant:8,label:9})}}function ce(l){let e=l[11](l[3])+"",i;return{c(){i=le(e)},m(n,s){j(n,i,s)},p(n,s){s&2056&&e!==(e=n[11](n[3])+"")&&ie(i,e)},d(n){n&&q(i)}}}function de(l){let e,i;return e=new oe({props:{elem_id:l[0],elem_classes:l[1],visible:l[2],file_count:l[4],file_types:l[5],size:l[6],scale:l[7],min_width:l[8],mode:l[9],variant:l[10],label:l[3],$$slots:{default:[ce]},$$scope:{ctx:l}}}),e.$on("click",l[15]),e.$on("load",l[12]),{c(){Q(e.$$.fragment)},m(n,s){R(e,n,s),i=!0},p(n,[s]){const u={};s&1&&(u.elem_id=n[0]),s&2&&(u.elem_classes=n[1]),s&4&&(u.visible=n[2]),s&16&&(u.file_count=n[4]),s&32&&(u.file_types=n[5]),s&64&&(u.size=n[6]),s&128&&(u.scale=n[7]),s&256&&(u.min_width=n[8]),s&512&&(u.mode=n[9]),s&1024&&(u.variant=n[10]),s&8&&(u.label=n[3]),s&264200&&(u.$$scope={dirty:s,ctx:n}),e.$set(u)},i(n){i||(D(e.$$.fragment,n),i=!0)},o(n){I(e.$$.fragment,n),i=!1},d(n){T(e,n)}}}function be(l,e,i){let n;p(l,fe,a=>i(11,n=a));let{elem_id:s=""}=e,{elem_classes:u=[]}=e,{visible:h=!0}=e,{label:c}=e,{value:m}=e,{file_count:d}=e,{file_types:g=[]}=e,{root:f}=e,{size:_="lg"}=e,{scale:o=null}=e,{min_width:k=void 0}=e,{mode:A="dynamic"}=e,{variant:B="secondary"}=e;const y=$("upload_files")??ee;async function E({detail:a}){i(13,m=a),await ne();let S=(Array.isArray(a)?a:[a]).map(z=>z.blob);y(f,S).then(async z=>{z.error?(Array.isArray(a)?a:[a]).forEach(async(b,t)=>{b.data=await se(b.blob),b.blob=void 0}):(Array.isArray(a)?a:[a]).forEach((b,t)=>{z.files&&(b.orig_name=b.name,b.name=z.files[t],b.is_file=!0,b.blob=void 0)}),v("change",m),v("upload",a)})}const v=V();function C(a){te.call(this,l,a)}return l.$$set=a=>{"elem_id"in a&&i(0,s=a.elem_id),"elem_classes"in a&&i(1,u=a.elem_classes),"visible"in a&&i(2,h=a.visible),"label"in a&&i(3,c=a.label),"value"in a&&i(13,m=a.value),"file_count"in a&&i(4,d=a.file_count),"file_types"in a&&i(5,g=a.file_types),"root"in a&&i(14,f=a.root),"size"in a&&i(6,_=a.size),"scale"in a&&i(7,o=a.scale),"min_width"in a&&i(8,k=a.min_width),"mode"in a&&i(9,A=a.mode),"variant"in a&&i(10,B=a.variant)},[s,u,h,c,d,g,_,o,k,A,B,n,E,m,f,C]}class re extends N{constructor(e){super(),O(this,e,be,de,P,{elem_id:0,elem_classes:1,visible:2,label:3,value:13,file_count:4,file_types:5,root:14,size:6,scale:7,min_width:8,mode:9,variant:10})}}const ye=re,ve=["static","dynamic"];export{ye as Component,ve as modes}; -//# sourceMappingURL=index-d80d0bbf.js.map diff --git a/spaces/Datasculptor/MusicGen/audiocraft/utils/export.py b/spaces/Datasculptor/MusicGen/audiocraft/utils/export.py deleted file mode 100644 index b513b52267f7bf5aae09282c15b0a2e20c8a8fee..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/MusicGen/audiocraft/utils/export.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Utility to export a training checkpoint to a lightweight release checkpoint. -""" - -from pathlib import Path -import typing as tp - -from omegaconf import OmegaConf, DictConfig -import torch - - -def _clean_lm_cfg(cfg: DictConfig): - OmegaConf.set_struct(cfg, False) - # This used to be set automatically in the LM solver, need a more robust solution - # for the future. - cfg['transformer_lm']['card'] = 2048 - cfg['transformer_lm']['n_q'] = 4 - # Experimental params no longer supported. - bad_params = ['spectral_norm_attn_iters', 'spectral_norm_ff_iters', - 'residual_balancer_attn', 'residual_balancer_ff', 'layer_drop'] - for name in bad_params: - del cfg['transformer_lm'][name] - OmegaConf.set_struct(cfg, True) - return cfg - - -def export_encodec(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]): - sig = Path(checkpoint_path).parent.name - assert len(sig) == 8, "Not a valid Dora signature" - pkg = torch.load(checkpoint_path, 'cpu') - new_pkg = { - 'best_state': pkg['ema']['state']['model'], - 'xp.cfg': OmegaConf.to_yaml(pkg['xp.cfg']), - } - out_file = Path(out_folder) / f'{sig}.th' - torch.save(new_pkg, out_file) - return out_file - - -def export_lm(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]): - sig = Path(checkpoint_path).parent.name - assert len(sig) == 8, "Not a valid Dora signature" - pkg = torch.load(checkpoint_path, 'cpu') - new_pkg = { - 'best_state': pkg['fsdp_best_state']['model'], - 'xp.cfg': OmegaConf.to_yaml(_clean_lm_cfg(pkg['xp.cfg'])) - } - out_file = Path(out_folder) / f'{sig}.th' - torch.save(new_pkg, out_file) - return out_file diff --git a/spaces/Deci/DeciLM-6b-instruct/app.py b/spaces/Deci/DeciLM-6b-instruct/app.py deleted file mode 100644 index d354d6d12b35023bf63ead894d19dd6ecc1f463a..0000000000000000000000000000000000000000 --- a/spaces/Deci/DeciLM-6b-instruct/app.py +++ /dev/null @@ -1,136 +0,0 @@ -import os -import gradio as gr -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer - -token = os.environ["HUGGINGFACEHUB_API_TOKEN"] - -model_id = 'Deci/DeciLM-6b-instruct' - -SYSTEM_PROMPT_TEMPLATE = """Below is an instruction that describes a task. Write a response that appropriately completes the request. - -### Instruction: - -{instruction} - -### Response: -""" - -DESCRIPTION = """ -#

    🤖 DeciLM-6B-Instruct: A Fast Instruction-Tuned Model💨

    -Welcome to DeciLM-6B-Instruct! DeciLM-6B-Instruct is a 6B parameter instruction-tuned language model and released under the Llama license. It's an instruction-tuned model, not a chat-tuned model; you should prompt the model with an instruction that describes a task, and the model will respond appropriately to complete the task. -

    Learn more about the base model DeciLM-6B.

    -""" - -if not torch.cuda.is_available(): - DESCRIPTION += 'You need a GPU for this example. Try using colab: https://bit.ly/decilm-instruct-nb' - -if torch.cuda.is_available(): - model = AutoModelForCausalLM.from_pretrained( - model_id, - torch_dtype=torch.float16, - device_map='auto', - trust_remote_code=True, - use_auth_token=token - ) -else: - model = None - -tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=token) -tokenizer.pad_token = tokenizer.eos_token - -# Function to construct the prompt using the new system prompt template -def get_prompt_with_template(message: str) -> str: - return SYSTEM_PROMPT_TEMPLATE.format(instruction=message) - -# Function to generate the model's response -def generate_model_response(message: str) -> str: - prompt = get_prompt_with_template(message) - inputs = tokenizer(prompt, return_tensors='pt') - if torch.cuda.is_available(): - inputs = inputs.to('cuda') - # Include **generate_kwargs to include the user-defined options - output = model.generate(**inputs, - max_new_tokens=3000, - num_beams=2, - no_repeat_ngram_size=4, - early_stopping=True, - do_sample=True - ) - return tokenizer.decode(output[0], skip_special_tokens=True) - -# Function to extract the content after "### Response:" -def extract_response_content(full_response: str, ) -> str: - response_start_index = full_response.find("### Response:") - if response_start_index != -1: - return full_response[response_start_index + len("### Response:"):].strip() - else: - return full_response - -# The main function that uses the dynamic generate_kwargs -def get_response_with_template(message: str) -> str: - full_response = generate_model_response(message) - return extract_response_content(full_response) - -with gr.Blocks(css="style.css") as demo: - gr.Markdown(DESCRIPTION) - gr.DuplicateButton(value='Duplicate Space for private use', - elem_id='duplicate-button') - with gr.Group(): - chatbot = gr.Textbox(label='DeciLM-6B-Instruct Output:') - with gr.Row(): - textbox = gr.Textbox( - container=False, - show_label=False, - placeholder='Type an instruction...', - scale=10, - elem_id="textbox" - ) - submit_button = gr.Button( - '💬 Submit', - variant='primary', - scale=1, - min_width=0, - elem_id="submit_button" - ) - - # Clear button to clear the chat history - clear_button = gr.Button( - '🗑️ Clear', - variant='secondary', - ) - - clear_button.click( - fn=lambda: ('',''), - outputs=[textbox, chatbot], - queue=False, - api_name=False, - ) - - submit_button.click( - fn=get_response_with_template, - inputs=textbox, - outputs= chatbot, - queue=False, - api_name=False, - ) - - gr.Examples( - examples=[ - 'Write detailed instructions for making chocolate chip pancakes.', - 'Write a 250-word article about your love of pancakes.', - 'Explain the plot of Back to the Future in three sentences.', - 'How do I make a trap beat?', - 'A step-by-step guide to learning Python in one month.', - ], - inputs=textbox, - outputs=chatbot, - fn=get_response_with_template, - cache_examples=True, - elem_id="examples" - ) - - - gr.HTML(label="Keep in touch", value="Keep in touch") - -demo.launch() \ No newline at end of file diff --git a/spaces/DragGan/DragGan-Inversion/PTI/training/projectors/w_projector.py b/spaces/DragGan/DragGan-Inversion/PTI/training/projectors/w_projector.py deleted file mode 100644 index a4caffc368f87e06b41eaac2807a273079708840..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/training/projectors/w_projector.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Project given image to the latent space of pretrained network pickle.""" - -import copy -import wandb -import numpy as np -import torch -import torch.nn.functional as F -from tqdm import tqdm -from PTI.configs import global_config, hyperparameters -from PTI.utils import log_utils -import dnnlib - - -def project( - G, - target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution - *, - num_steps=1000, - w_avg_samples=10000, - initial_learning_rate=0.01, - initial_noise_factor=0.05, - lr_rampdown_length=0.25, - lr_rampup_length=0.05, - noise_ramp_length=0.75, - regularize_noise_weight=1e5, - verbose=False, - device: torch.device, - use_wandb=False, - initial_w=None, - image_log_step=global_config.image_rec_result_log_snapshot, - w_name: str -): - assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution),print(target.shape,G.img_resolution) - - def logprint(*args): - if verbose: - print(*args) - - G = copy.deepcopy(G).eval().requires_grad_(False).to(device).float() # type: ignore - - # Compute w stats. - logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...') - z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim) - w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C] - w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C] - w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C] - w_avg_tensor = torch.from_numpy(w_avg).to(global_config.device) - w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5 - - start_w = initial_w if initial_w is not None else w_avg - - # Setup noise inputs. - noise_bufs = {name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name} - - # Load VGG16 feature detector. - url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt' - with dnnlib.util.open_url(url) as f: - vgg16 = torch.jit.load(f).eval().to(device) - - # Features for target image. - target_images = target.unsqueeze(0).to(device).to(torch.float32) - if target_images.shape[2] > 256: - target_images = F.interpolate(target_images, size=(256, 256), mode='area') - target_features = vgg16(target_images, resize_images=False, return_lpips=True) - - w_opt = torch.tensor(start_w, dtype=torch.float32, device=device, - requires_grad=True) # pylint: disable=not-callable - optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999), - lr=hyperparameters.first_inv_lr) - - # Init noise. - for buf in noise_bufs.values(): - buf[:] = torch.randn_like(buf) - buf.requires_grad = True - - for step in tqdm(range(num_steps)): - - # Learning rate schedule. - t = step / num_steps - w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2 - lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length) - lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi) - lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length) - lr = initial_learning_rate * lr_ramp - for param_group in optimizer.param_groups: - param_group['lr'] = lr - - # Synth images from opt_w. - w_noise = torch.randn_like(w_opt) * w_noise_scale - ws = (w_opt + w_noise).repeat([1, G.mapping.num_ws, 1]) - synth_images = G.synthesis(ws, noise_mode='const', force_fp32=True) - - # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images. - synth_images = (synth_images + 1) * (255 / 2) - if synth_images.shape[2] > 256: - synth_images = F.interpolate(synth_images, size=(256, 256), mode='area') - - # Features for synth images. - synth_features = vgg16(synth_images, resize_images=False, return_lpips=True) - dist = (target_features - synth_features).square().sum() - - # Noise regularization. - reg_loss = 0.0 - for v in noise_bufs.values(): - noise = v[None, None, :, :] # must be [1,1,H,W] for F.avg_pool2d() - while True: - reg_loss += (noise * torch.roll(noise, shifts=1, dims=3)).mean() ** 2 - reg_loss += (noise * torch.roll(noise, shifts=1, dims=2)).mean() ** 2 - if noise.shape[2] <= 8: - break - noise = F.avg_pool2d(noise, kernel_size=2) - loss = dist + reg_loss * regularize_noise_weight - - if step % image_log_step == 0: - with torch.no_grad(): - if use_wandb: - global_config.training_step += 1 - wandb.log({f'first projection _{w_name}': loss.detach().cpu()}, step=global_config.training_step) - log_utils.log_image_from_w(w_opt.repeat([1, G.mapping.num_ws, 1]), G, w_name) - - # Step - optimizer.zero_grad(set_to_none=True) - loss.backward() - optimizer.step() - logprint(f'step {step + 1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}') - - # Normalize noise. - with torch.no_grad(): - for buf in noise_bufs.values(): - buf -= buf.mean() - buf *= buf.square().mean().rsqrt() - - del G - return w_opt.repeat([1, 18, 1]) diff --git a/spaces/DragGan/DragGan/stylegan_human/torch_utils/ops/conv2d_gradfix.py b/spaces/DragGan/DragGan/stylegan_human/torch_utils/ops/conv2d_gradfix.py deleted file mode 100644 index 093036b728336d6f2f593aaea187054a8af8d523..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/torch_utils/ops/conv2d_gradfix.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom replacement for `torch.nn.functional.conv2d` that supports -arbitrarily high order gradients with zero performance penalty.""" - -import warnings -import contextlib -import torch - -# pylint: disable=redefined-builtin -# pylint: disable=arguments-differ -# pylint: disable=protected-access - -#---------------------------------------------------------------------------- - -enabled = False # Enable the custom op by setting this to true. -weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights. - -@contextlib.contextmanager -def no_weight_gradients(): - global weight_gradients_disabled - old = weight_gradients_disabled - weight_gradients_disabled = True - yield - weight_gradients_disabled = old - -#---------------------------------------------------------------------------- - -def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): - if _should_use_custom_op(input): - return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias) - return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) - -def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1): - if _should_use_custom_op(input): - return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias) - return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation) - -#---------------------------------------------------------------------------- - -def _should_use_custom_op(input): - assert isinstance(input, torch.Tensor) - if (not enabled) or (not torch.backends.cudnn.enabled): - return False - if input.device.type != 'cuda': - return False - if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']): - return True - warnings.warn(f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().') - return False - -def _tuple_of_ints(xs, ndim): - xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim - assert len(xs) == ndim - assert all(isinstance(x, int) for x in xs) - return xs - -#---------------------------------------------------------------------------- - -_conv2d_gradfix_cache = dict() - -def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups): - # Parse arguments. - ndim = 2 - weight_shape = tuple(weight_shape) - stride = _tuple_of_ints(stride, ndim) - padding = _tuple_of_ints(padding, ndim) - output_padding = _tuple_of_ints(output_padding, ndim) - dilation = _tuple_of_ints(dilation, ndim) - - # Lookup from cache. - key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) - if key in _conv2d_gradfix_cache: - return _conv2d_gradfix_cache[key] - - # Validate arguments. - assert groups >= 1 - assert len(weight_shape) == ndim + 2 - assert all(stride[i] >= 1 for i in range(ndim)) - assert all(padding[i] >= 0 for i in range(ndim)) - assert all(dilation[i] >= 0 for i in range(ndim)) - if not transpose: - assert all(output_padding[i] == 0 for i in range(ndim)) - else: # transpose - assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim)) - - # Helpers. - common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups) - def calc_output_padding(input_shape, output_shape): - if transpose: - return [0, 0] - return [ - input_shape[i + 2] - - (output_shape[i + 2] - 1) * stride[i] - - (1 - 2 * padding[i]) - - dilation[i] * (weight_shape[i + 2] - 1) - for i in range(ndim) - ] - - # Forward & backward. - class Conv2d(torch.autograd.Function): - @staticmethod - def forward(ctx, input, weight, bias): - assert weight.shape == weight_shape - if not transpose: - output = torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) - else: # transpose - output = torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs) - ctx.save_for_backward(input, weight) - return output - - @staticmethod - def backward(ctx, grad_output): - input, weight = ctx.saved_tensors - grad_input = None - grad_weight = None - grad_bias = None - - if ctx.needs_input_grad[0]: - p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape) - grad_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None) - assert grad_input.shape == input.shape - - if ctx.needs_input_grad[1] and not weight_gradients_disabled: - grad_weight = Conv2dGradWeight.apply(grad_output, input) - assert grad_weight.shape == weight_shape - - if ctx.needs_input_grad[2]: - grad_bias = grad_output.sum([0, 2, 3]) - - return grad_input, grad_weight, grad_bias - - # Gradient with respect to the weights. - class Conv2dGradWeight(torch.autograd.Function): - @staticmethod - def forward(ctx, grad_output, input): - op = torch._C._jit_get_operation('aten::cudnn_convolution_backward_weight' if not transpose else 'aten::cudnn_convolution_transpose_backward_weight') - flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32] - grad_weight = op(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags) - assert grad_weight.shape == weight_shape - ctx.save_for_backward(grad_output, input) - return grad_weight - - @staticmethod - def backward(ctx, grad2_grad_weight): - grad_output, input = ctx.saved_tensors - grad2_grad_output = None - grad2_input = None - - if ctx.needs_input_grad[0]: - grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None) - assert grad2_grad_output.shape == grad_output.shape - - if ctx.needs_input_grad[1]: - p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape) - grad2_input = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, grad2_grad_weight, None) - assert grad2_input.shape == input.shape - - return grad2_grad_output, grad2_input - - _conv2d_gradfix_cache[key] = Conv2d - return Conv2d - -#---------------------------------------------------------------------------- diff --git a/spaces/EPFL-VILAB/MultiMAE/mask2former/modeling/pixel_decoder/ops/src/cuda/ms_deform_attn_cuda.h b/spaces/EPFL-VILAB/MultiMAE/mask2former/modeling/pixel_decoder/ops/src/cuda/ms_deform_attn_cuda.h deleted file mode 100644 index 4f0658e8668a11f0e7d71deff9adac71884f2e87..0000000000000000000000000000000000000000 --- a/spaces/EPFL-VILAB/MultiMAE/mask2former/modeling/pixel_decoder/ops/src/cuda/ms_deform_attn_cuda.h +++ /dev/null @@ -1,35 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -/*! -* Copyright (c) Facebook, Inc. and its affiliates. -* Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR -*/ - -#pragma once -#include - -at::Tensor ms_deform_attn_cuda_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step); - -std::vector ms_deform_attn_cuda_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step); - diff --git a/spaces/Eddycrack864/Applio-Inference/julius/utils.py b/spaces/Eddycrack864/Applio-Inference/julius/utils.py deleted file mode 100644 index 944b973ad1a38700c1ba98ab7306c233cb87868d..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/julius/utils.py +++ /dev/null @@ -1,101 +0,0 @@ -# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. -# Author: adefossez, 2020 -""" -Non signal processing related utilities. -""" - -import inspect -import typing as tp -import sys -import time - - -def simple_repr(obj, attrs: tp.Optional[tp.Sequence[str]] = None, - overrides: dict = {}): - """ - Return a simple representation string for `obj`. - If `attrs` is not None, it should be a list of attributes to include. - """ - params = inspect.signature(obj.__class__).parameters - attrs_repr = [] - if attrs is None: - attrs = list(params.keys()) - for attr in attrs: - display = False - if attr in overrides: - value = overrides[attr] - elif hasattr(obj, attr): - value = getattr(obj, attr) - else: - continue - if attr in params: - param = params[attr] - if param.default is inspect._empty or value != param.default: # type: ignore - display = True - else: - display = True - - if display: - attrs_repr.append(f"{attr}={value}") - return f"{obj.__class__.__name__}({','.join(attrs_repr)})" - - -class MarkdownTable: - """ - Simple MarkdownTable generator. The column titles should be large enough - for the lines content. This will right align everything. - - >>> import io # we use io purely for test purposes, default is sys.stdout. - >>> file = io.StringIO() - >>> table = MarkdownTable(["Item Name", "Price"], file=file) - >>> table.header(); table.line(["Honey", "5"]); table.line(["Car", "5,000"]) - >>> print(file.getvalue().strip()) # Strip for test purposes - | Item Name | Price | - |-----------|-------| - | Honey | 5 | - | Car | 5,000 | - """ - def __init__(self, columns, file=sys.stdout): - self.columns = columns - self.file = file - - def _writeln(self, line): - self.file.write("|" + "|".join(line) + "|\n") - - def header(self): - self._writeln(f" {col} " for col in self.columns) - self._writeln("-" * (len(col) + 2) for col in self.columns) - - def line(self, line): - out = [] - for val, col in zip(line, self.columns): - val = format(val, '>' + str(len(col))) - out.append(" " + val + " ") - self._writeln(out) - - -class Chrono: - """ - Measures ellapsed time, calling `torch.cuda.synchronize` if necessary. - `Chrono` instances can be used as context managers (e.g. with `with`). - Upon exit of the block, you can access the duration of the block in seconds - with the `duration` attribute. - - >>> with Chrono() as chrono: - ... _ = sum(range(10_000)) - ... - >>> print(chrono.duration < 10) # Should be true unless on a really slow computer. - True - """ - def __init__(self): - self.duration = None - - def __enter__(self): - self._begin = time.time() - return self - - def __exit__(self, exc_type, exc_value, exc_tracebck): - import torch - if torch.cuda.is_available(): - torch.cuda.synchronize() - self.duration = time.time() - self._begin diff --git a/spaces/Edisonymy/buy-or-rent/src/mainbody.py b/spaces/Edisonymy/buy-or-rent/src/mainbody.py deleted file mode 100644 index 008bd9eb393ffb637015b3f92f15b5e729ec72ff..0000000000000000000000000000000000000000 --- a/spaces/Edisonymy/buy-or-rent/src/mainbody.py +++ /dev/null @@ -1,237 +0,0 @@ -import streamlit as st -import pandas as pd -import numpy as np -from buy_or_rent import Buy_or_Rent_Model -from scipy.stats import norm, skew -from utils.general import calculate_percentiles -from utils.streamlit_utils import sticky_bottom_bar -from plot import plot_hist_from_list -import hydralit_components as hc -import warnings - -warnings.simplefilter(action="ignore", category=FutureWarning) - - -def generate_main_body( - model: Buy_or_Rent_Model, - mortgage_interest_annual_list=np.array([0.05]), - property_price_growth_annual_list=np.array([0.026]), - rent_increase_list=np.array([0.01325]), - investment_return_annual_list=np.array([0.06]), - years_until_sell_list=np.array([20]), -): - - adjust_for_inflation_bool = st.sidebar.toggle("Adjust for inflation (2% a year)") - # use_present_value = st.toggle('Use present value instead of future value') - # define what option labels and icons to display - option_data = [ - {"icon": "bi bi-calculator", "label": "Typical Outcome"}, - {"icon": "bi bi-bar-chart-line", "label": "Simulation Results"}, - ] - - # override the theme, else it will use the Streamlit applied theme - over_theme = { - "txc_inactive": "black", - "menu_background": "#b8d7ed", - "txc_active": "black", - "option_active": "white", - } - font_fmt = {"font-class": "h2", "font-size": "100%"} - - # display a horizontal version of the option bar - op = hc.option_bar( - option_definition=option_data, - key="PrimaryOption", - override_theme=over_theme, - font_styling=font_fmt, - horizontal_orientation=True, - ) - n_samples_simulation = 1000 - - if op == "Simulation Results": - n_samples_simulation = st.slider( - "Number of Simulation Samples:", - min_value=500, - max_value=5000, - value=1000, - step=100, - ) - - model.samples_rent_increase = np.random.choice(rent_increase_list, n_samples_simulation) - model.samples_property_price_growth_annual = np.random.choice(property_price_growth_annual_list, n_samples_simulation) - model.samples_mortgage_interest_annual = np.random.choice(mortgage_interest_annual_list, n_samples_simulation) - model.samples_investment_return_annual = np.random.choice(investment_return_annual_list, n_samples_simulation) - model.samples_years_until_sell = np.random.choice(years_until_sell_list, n_samples_simulation) - - model.run_calculations(adjust_for_inflation_bool = adjust_for_inflation_bool) - #save simulation results - buying_npv_list = model.buying_npv - buying_fv_list = model.buying_fv - renting_fv_list = model.renting_fv - mortgage_interest_annual_list_chosen = model.samples_mortgage_interest_annual - property_price_growth_annual_list_chosen = model.samples_property_price_growth_annual - rent_increase_list_chosen = model.samples_rent_increase - investment_return_annual_list_chosen = model.samples_investment_return_annual - years_until_sell_list_chosen = model.samples_years_until_sell - - # typical scenario - model.samples_rent_increase = np.median(rent_increase_list) - model.samples_property_price_growth_annual = np.median(property_price_growth_annual_list) - model.samples_mortgage_interest_annual = np.median(mortgage_interest_annual_list) - model.samples_investment_return_annual = np.median(investment_return_annual_list) - model.samples_years_until_sell = int(np.median(years_until_sell_list)) - model.run_calculations(adjust_for_inflation_bool=adjust_for_inflation_bool) - - if model.buying_fv > model.renting_fv: - text = "Return is typically higher if you buy." - if np.std(buying_fv_list) > np.std(renting_fv_list): - text += " However, it is less risky if you rent." - else: - text += " It is also less risky if you buy." - else: - text = "Return is typically higher if you rent and invest the deposit." - if np.std(buying_fv_list) > np.std(renting_fv_list): - text += " It is also less risky if you rent." - else: - text += " However, it is less risky if you buy." - - sticky_bottom_bar(text) - - - - - if op == "Typical Outcome": - left_column, right_column = st.columns(2) - with left_column: - st.write( - f"### Buy - Asset future value after {model.samples_years_until_sell} years" - ) - st.markdown( - f"**Typical Total Asset Value: £{model.buying_fv:,.0f}**", - help="All components are converted to future value at the time of sale.", - ) - st.markdown(f"***Breakdown:***") - st.markdown(f" - Capital Invested (deposit): £{model.DEPOSIT:,.0f}") - st.markdown( - f" - Capital Invested (buying cost + stamp duty, if any): £{model.BUYING_COST_FLAT + model.STAMP_DUTY:,.0f}" - ) - st.markdown( - f" - Property Price at Sale: :green[£{model.future_house_price:,.0f}]", - help="Calculated using the property price growth rate set in the left sidebar.", - ) - st.markdown( - f" - Selling cost (including Capital Gains Tax): :red[ -£{model.SELLING_COST:,.0f}]", - help="Total expenses incurred when selling a property. These costs typically include real estate agent commissions, legal fees, advertising expenses, and any necessary repairs or renovations to prepare the property for sale.", - ) - st.markdown( - f" - Total maintenance and service costs: :red[ -£{model.fv_ongoing_cost:,.0f}]", - help="Future value at the time of sale for the total cost associated with maintaining and servicing a property, including expenses such as property management fees, maintenance fees, and other related charges. Assumed to grow at inflation rate. Future value is determined by the discount rate, which is assumed to be equal to the investment return.", - ) - if model.COUNTRY == "US": - st.markdown( - f" - Total property tax: :red[ -£{model.fv_property_tax:,.0f}]", - help="Future value at the time of sale for the total property tax paid", - ) - st.markdown( - f" - Total Mortgage Payments: :red[ -£{model.fv_mortgage_payments:,.0f}]", - help="This is higher than the sum of all mortgage payments since the payments are converted to their future value at the time of sale. Future value is determined by the discount rate, which is assumed to be equal to the investment return.", - ) - st.markdown( - f" - Total Rent Saved (future value at time of sale): :green[£{model.rent_fv:,.0f}]", - help="This is higher than the sum of all rent payments that would have been paid since the payments are converted to their future value at the time of sale. Future value is determined by the discount rate, which is assumed to be equal to the investment return.", - ) - - with right_column: - st.write( - f"### Rent and invest - Asset future value after {model.samples_years_until_sell} years" - ) - st.markdown( - f"**Typical Total Asset Value: £{model.renting_fv:,.0f}**", - help="All components are converted to future value at the time of sale.", - ) - st.markdown(f"***Breakdown:***") - st.markdown(f" - Capital Invested (deposit): £{model.DEPOSIT:,.0f}") - st.markdown( - f" - Capital Invested (buying cost + stamp duty, if any): £{model.BUYING_COST_FLAT + model.STAMP_DUTY:,.0f}" - ) - st.markdown( - f" - Capital Gains Tax: :red[-£{model.cgt_investment:,.0f}]", - help="Your tax rate is determined by the annual salary set in the left sidebar.", - ) - if ( - model.renting_fv - - (model.DEPOSIT + model.BUYING_COST_FLAT + model.STAMP_DUTY) - >= 0 - ): - st.markdown( - f" - Assumed Typical Capital Growth: :green[£{model.renting_fv - (model.DEPOSIT + model.BUYING_COST_FLAT + model.STAMP_DUTY):,.0f}]", - help="Calculated with the investment return rated provided in the left sidebar.", - ) - else: - st.markdown( - f" - Assumed Typical Capital Growth: :red[£{model.renting_fv - (model.DEPOSIT + model.BUYING_COST_FLAT + model.STAMP_DUTY):,.0f}]" - ) - - if op == "Simulation Results": - - - plot_hist_from_list( - [buying_fv_list, renting_fv_list], - st, - figsize=(7, 2), - legends=["Buying", "Renting"], - main_colors=["orange", "blue"], - title="Future Asset Value - Simulation Results", - xlabel="Asset Value", - ) - st.markdown( - "Simulation results for future asset value. Using future value at 'years until sell mean' in your assumptions.", - unsafe_allow_html=True, - ) - plot_hist_from_list( - [buying_npv_list], - st, - plot_below_zero=True, - clip=(0, None), - main_colors=["blue"], - secondary_color="orange", - title="Net Present Value of Buying - Simulation Results", - xlabel="Net Present Value of Buying", - ) - st.markdown( - "Negative = Renting is better; Positive = Buying is better.", - unsafe_allow_html=True, - ) - st.markdown( - "Net Present Value represents the net gain/loss that result in purchasing the property in present value. It is calculated as (PV of future house sale price - PV of rent saved - PV of mortgage payments - PV of ongoing costs - deposit - buying costs - stamp duty - PV of selling costs). If it is positive, then it is financially better to buy a property. Present value is calculated using a future discount rate equal to your assumed investment return. This is equivalent to assuming that any amount you save on rent or mortgage will be invested. ", - unsafe_allow_html=True, - ) - - results_dict = { - "buying_npv": buying_npv_list, - "mortgage_interest_annual": mortgage_interest_annual_list_chosen, - "property_price_growth_annual": property_price_growth_annual_list_chosen, - "rent_increase": rent_increase_list_chosen, - "investment_return_annual": investment_return_annual_list_chosen, - "years_until_sell": years_until_sell_list_chosen, - } - results_df = pd.DataFrame(results_dict) - percentiles_df = calculate_percentiles(buying_npv_list, model.DEPOSIT) - with st.expander("### Net Present Value Statistics", expanded=False): - st.write( - f'- Buying is better {100-percentiles_df.loc[5,"Percentile"]:.0f}% of the time' - ) - st.write(f"- Mean: £{np.mean(buying_npv_list):,.0f}") - st.write( - f"- Mean (as % of deposit): {np.mean(buying_npv_list)/model.DEPOSIT*100:.0f}%" - ) - st.write(f"- Standard Deviation: £{np.std(buying_npv_list):,.0f}") - st.write( - f"- Standard Deviation (as % of deposit): {np.std(buying_npv_list)/model.DEPOSIT*100:.0f}%" - ) - st.write(f"- Skew: {skew(buying_npv_list):.2f}") - with st.expander( - "Correlations Between Parameters and Buying NPV", expanded=False - ): - st.write(results_df.corr().iloc[0, 1:]) - # return percentiles_df, results_df diff --git a/spaces/Epoching/DocumentQA/DiT_Extractor/dit_object_detection/ditod/beit.py b/spaces/Epoching/DocumentQA/DiT_Extractor/dit_object_detection/ditod/beit.py deleted file mode 100644 index 03d4fabdc7816f19a8810e3c443643bc9e53e6b9..0000000000000000000000000000000000000000 --- a/spaces/Epoching/DocumentQA/DiT_Extractor/dit_object_detection/ditod/beit.py +++ /dev/null @@ -1,671 +0,0 @@ -""" Vision Transformer (ViT) in PyTorch - -A PyTorch implement of Vision Transformers as described in -'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 - -The official jax code is released and available at https://github.com/google-research/vision_transformer - -Status/TODO: -* Models updated to be compatible with official impl. Args added to support backward compat for old PyTorch weights. -* Weights ported from official jax impl for 384x384 base and small models, 16x16 and 32x32 patches. -* Trained (supervised on ImageNet-1k) my custom 'small' patch model to 77.9, 'base' to 79.4 top-1 with this code. -* Hopefully find time and GPUs for SSL or unsupervised pretraining on OpenImages w/ ImageNet fine-tune in future. - -Acknowledgments: -* The paper authors for releasing code and weights, thanks! -* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out -for some einops/einsum fun -* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT -* Bert reference code checks against Huggingface Transformers and Tensorflow Bert - -Hacked together by / Copyright 2020 Ross Wightman -""" -import warnings -import math -import torch -from functools import partial -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -from timm.models.layers import drop_path, to_2tuple, trunc_normal_ - - -def _cfg(url='', **kwargs): - return { - 'url': url, - 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, - 'crop_pct': .9, 'interpolation': 'bicubic', - 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), - **kwargs - } - - -class DropPath(nn.Module): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - """ - - def __init__(self, drop_prob=None): - super(DropPath, self).__init__() - self.drop_prob = drop_prob - - def forward(self, x): - return drop_path(x, self.drop_prob, self.training) - - def extra_repr(self) -> str: - return 'p={}'.format(self.drop_prob) - - -class Mlp(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - # x = self.drop(x) - # commit this for the orignal BERT implement - x = self.fc2(x) - x = self.drop(x) - return x - - -class Attention(nn.Module): - def __init__( - self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., - proj_drop=0., window_size=None, attn_head_dim=None): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - if attn_head_dim is not None: - head_dim = attn_head_dim - all_head_dim = head_dim * self.num_heads - # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights - self.scale = qk_scale or head_dim ** -0.5 - - self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) - if qkv_bias: - self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) - self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) - else: - self.q_bias = None - self.v_bias = None - - if window_size: - self.window_size = window_size - self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 - self.relative_position_bias_table = nn.Parameter( - torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH - # cls to token & token 2 cls & cls to cls - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(window_size[0]) - coords_w = torch.arange(window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * window_size[1] - 1 - relative_position_index = \ - torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) - relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - relative_position_index[0, 0:] = self.num_relative_distance - 3 - relative_position_index[0:, 0] = self.num_relative_distance - 2 - relative_position_index[0, 0] = self.num_relative_distance - 1 - - self.register_buffer("relative_position_index", relative_position_index) - - # trunc_normal_(self.relative_position_bias_table, std=.0) - else: - self.window_size = None - self.relative_position_bias_table = None - self.relative_position_index = None - - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(all_head_dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - def forward(self, x, rel_pos_bias=None, training_window_size=None): - B, N, C = x.shape - qkv_bias = None - if self.q_bias is not None: - qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) - # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) - qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = (q @ k.transpose(-2, -1)) - - if self.relative_position_bias_table is not None: - if training_window_size == self.window_size: - relative_position_bias = \ - self.relative_position_bias_table[self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1] + 1, - self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - else: - training_window_size = tuple(training_window_size.tolist()) - new_num_relative_distance = (2 * training_window_size[0] - 1) * (2 * training_window_size[1] - 1) + 3 - # new_num_relative_dis 为 所有可能的相对位置选项,包含cls-cls,tok-cls,与cls-tok - new_relative_position_bias_table = F.interpolate( - self.relative_position_bias_table[:-3, :].permute(1, 0).view(1, self.num_heads, - 2 * self.window_size[0] - 1, - 2 * self.window_size[1] - 1), - size=(2 * training_window_size[0] - 1, 2 * training_window_size[1] - 1), mode='bicubic', - align_corners=False) - new_relative_position_bias_table = new_relative_position_bias_table.view(self.num_heads, - new_num_relative_distance - 3).permute( - 1, 0) - new_relative_position_bias_table = torch.cat( - [new_relative_position_bias_table, self.relative_position_bias_table[-3::]], dim=0) - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(training_window_size[0]) - coords_w = torch.arange(training_window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += training_window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += training_window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * training_window_size[1] - 1 - relative_position_index = \ - torch.zeros(size=(training_window_size[0] * training_window_size[1] + 1,) * 2, - dtype=relative_coords.dtype) - relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - relative_position_index[0, 0:] = new_num_relative_distance - 3 - relative_position_index[0:, 0] = new_num_relative_distance - 2 - relative_position_index[0, 0] = new_num_relative_distance - 1 - - relative_position_bias = \ - new_relative_position_bias_table[relative_position_index.view(-1)].view( - training_window_size[0] * training_window_size[1] + 1, - training_window_size[0] * training_window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if rel_pos_bias is not None: - attn = attn + rel_pos_bias - - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B, N, -1) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class Block(nn.Module): - - def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, - window_size=None, attn_head_dim=None): - super().__init__() - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, - attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - if init_values is not None: - self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) - self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) - else: - self.gamma_1, self.gamma_2 = None, None - - def forward(self, x, rel_pos_bias=None, training_window_size=None): - if self.gamma_1 is None: - x = x + self.drop_path( - self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, training_window_size=training_window_size)) - x = x + self.drop_path(self.mlp(self.norm2(x))) - else: - x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, - training_window_size=training_window_size)) - x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) - return x - - -class PatchEmbed(nn.Module): - """ Image to Patch Embedding - """ - - def __init__(self, img_size=[224, 224], patch_size=16, in_chans=3, embed_dim=768): - super().__init__() - img_size = to_2tuple(img_size) - patch_size = to_2tuple(patch_size) - num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) - self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) - self.num_patches_w = self.patch_shape[0] - self.num_patches_h = self.patch_shape[1] - # the so-called patch_shape is the patch shape during pre-training - self.img_size = img_size - self.patch_size = patch_size - self.num_patches = num_patches - - self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - - def forward(self, x, position_embedding=None, **kwargs): - # FIXME look at relaxing size constraints - # assert H == self.img_size[0] and W == self.img_size[1], \ - # f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." - x = self.proj(x) - Hp, Wp = x.shape[2], x.shape[3] - - if position_embedding is not None: - # interpolate the position embedding to the corresponding size - position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(0, 3, - 1, 2) - position_embedding = F.interpolate(position_embedding, size=(Hp, Wp), mode='bicubic') - x = x + position_embedding - - x = x.flatten(2).transpose(1, 2) - return x, (Hp, Wp) - - -class HybridEmbed(nn.Module): - """ CNN Feature Map Embedding - Extract feature map from CNN, flatten, project to embedding dim. - """ - - def __init__(self, backbone, img_size=[224, 224], feature_size=None, in_chans=3, embed_dim=768): - super().__init__() - assert isinstance(backbone, nn.Module) - img_size = to_2tuple(img_size) - self.img_size = img_size - self.backbone = backbone - if feature_size is None: - with torch.no_grad(): - # FIXME this is hacky, but most reliable way of determining the exact dim of the output feature - # map for all networks, the feature metadata has reliable channel and stride info, but using - # stride to calc feature dim requires info about padding of each stage that isn't captured. - training = backbone.training - if training: - backbone.eval() - o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1] - feature_size = o.shape[-2:] - feature_dim = o.shape[1] - backbone.train(training) - else: - feature_size = to_2tuple(feature_size) - feature_dim = self.backbone.feature_info.channels()[-1] - self.num_patches = feature_size[0] * feature_size[1] - self.proj = nn.Linear(feature_dim, embed_dim) - - def forward(self, x): - x = self.backbone(x)[-1] - x = x.flatten(2).transpose(1, 2) - x = self.proj(x) - return x - - -class RelativePositionBias(nn.Module): - - def __init__(self, window_size, num_heads): - super().__init__() - self.window_size = window_size - self.num_heads = num_heads - self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 - self.relative_position_bias_table = nn.Parameter( - torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH - # cls to token & token 2 cls & cls to cls - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(window_size[0]) - coords_w = torch.arange(window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * window_size[1] - 1 - relative_position_index = \ - torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) - relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - relative_position_index[0, 0:] = self.num_relative_distance - 3 - relative_position_index[0:, 0] = self.num_relative_distance - 2 - relative_position_index[0, 0] = self.num_relative_distance - 1 - - self.register_buffer("relative_position_index", relative_position_index) - - # trunc_normal_(self.relative_position_bias_table, std=.02) - - def forward(self, training_window_size): - if training_window_size == self.window_size: - relative_position_bias = \ - self.relative_position_bias_table[self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1] + 1, - self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - else: - training_window_size = tuple(training_window_size.tolist()) - new_num_relative_distance = (2 * training_window_size[0] - 1) * (2 * training_window_size[1] - 1) + 3 - # new_num_relative_dis 为 所有可能的相对位置选项,包含cls-cls,tok-cls,与cls-tok - new_relative_position_bias_table = F.interpolate( - self.relative_position_bias_table[:-3, :].permute(1, 0).view(1, self.num_heads, - 2 * self.window_size[0] - 1, - 2 * self.window_size[1] - 1), - size=(2 * training_window_size[0] - 1, 2 * training_window_size[1] - 1), mode='bicubic', - align_corners=False) - new_relative_position_bias_table = new_relative_position_bias_table.view(self.num_heads, - new_num_relative_distance - 3).permute( - 1, 0) - new_relative_position_bias_table = torch.cat( - [new_relative_position_bias_table, self.relative_position_bias_table[-3::]], dim=0) - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(training_window_size[0]) - coords_w = torch.arange(training_window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += training_window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += training_window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * training_window_size[1] - 1 - relative_position_index = \ - torch.zeros(size=(training_window_size[0] * training_window_size[1] + 1,) * 2, - dtype=relative_coords.dtype) - relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - relative_position_index[0, 0:] = new_num_relative_distance - 3 - relative_position_index[0:, 0] = new_num_relative_distance - 2 - relative_position_index[0, 0] = new_num_relative_distance - 1 - - relative_position_bias = \ - new_relative_position_bias_table[relative_position_index.view(-1)].view( - training_window_size[0] * training_window_size[1] + 1, - training_window_size[0] * training_window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - - return relative_position_bias - - -class BEiT(nn.Module): - """ Vision Transformer with support for patch or hybrid CNN input stage - """ - - def __init__(self, - img_size=[224, 224], - patch_size=16, - in_chans=3, - num_classes=80, - embed_dim=768, - depth=12, - num_heads=12, - mlp_ratio=4., - qkv_bias=False, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0., - hybrid_backbone=None, - norm_layer=None, - init_values=None, - use_abs_pos_emb=False, - use_rel_pos_bias=False, - use_shared_rel_pos_bias=False, - use_checkpoint=True, - pretrained=None, - out_features=None, - ): - - super(BEiT, self).__init__() - - norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) - self.num_classes = num_classes - self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models - self.use_checkpoint = use_checkpoint - - if hybrid_backbone is not None: - self.patch_embed = HybridEmbed( - hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) - else: - self.patch_embed = PatchEmbed( - img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) - num_patches = self.patch_embed.num_patches - self.out_features = out_features - self.out_indices = [int(name[5:]) for name in out_features] - - self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) - # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) - if use_abs_pos_emb: - self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) - else: - self.pos_embed = None - self.pos_drop = nn.Dropout(p=drop_rate) - - self.use_shared_rel_pos_bias = use_shared_rel_pos_bias - if use_shared_rel_pos_bias: - self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) - else: - self.rel_pos_bias = None - - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule - self.use_rel_pos_bias = use_rel_pos_bias - self.blocks = nn.ModuleList([ - Block( - dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, - init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) - for i in range(depth)]) - - # trunc_normal_(self.mask_token, std=.02) - - if patch_size == 16: - self.fpn1 = nn.Sequential( - nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), - # nn.SyncBatchNorm(embed_dim), - nn.BatchNorm2d(embed_dim), - nn.GELU(), - nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), - ) - - self.fpn2 = nn.Sequential( - nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), - ) - - self.fpn3 = nn.Identity() - - self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) - elif patch_size == 8: - self.fpn1 = nn.Sequential( - nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), - ) - - self.fpn2 = nn.Identity() - - self.fpn3 = nn.Sequential( - nn.MaxPool2d(kernel_size=2, stride=2), - ) - - self.fpn4 = nn.Sequential( - nn.MaxPool2d(kernel_size=4, stride=4), - ) - - if self.pos_embed is not None: - trunc_normal_(self.pos_embed, std=.02) - trunc_normal_(self.cls_token, std=.02) - self.apply(self._init_weights) - self.fix_init_weight() - - def fix_init_weight(self): - def rescale(param, layer_id): - param.div_(math.sqrt(2.0 * layer_id)) - - for layer_id, layer in enumerate(self.blocks): - rescale(layer.attn.proj.weight.data, layer_id + 1) - rescale(layer.mlp.fc2.weight.data, layer_id + 1) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - ''' - def init_weights(self): - """Initialize the weights in backbone. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - logger = get_root_logger() - - if self.pos_embed is not None: - trunc_normal_(self.pos_embed, std=.02) - trunc_normal_(self.cls_token, std=.02) - self.apply(self._init_weights) - self.fix_init_weight() - - if self.init_cfg is None: - logger.warn(f'No pre-trained weights for ' - f'{self.__class__.__name__}, ' - f'training start from scratch') - else: - assert 'checkpoint' in self.init_cfg, f'Only support ' \ - f'specify `Pretrained` in ' \ - f'`init_cfg` in ' \ - f'{self.__class__.__name__} ' - logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}") - load_checkpoint(self, - filename=self.init_cfg['checkpoint'], - strict=False, - logger=logger, - beit_spec_expand_rel_pos = self.use_rel_pos_bias, - ) - ''' - - def get_num_layers(self): - return len(self.blocks) - - @torch.jit.ignore - def no_weight_decay(self): - return {'pos_embed', 'cls_token'} - - def forward_features(self, x): - B, C, H, W = x.shape - x, (Hp, Wp) = self.patch_embed(x, self.pos_embed[:, 1:, :] if self.pos_embed is not None else None) - # Hp, Wp are HW for patches - batch_size, seq_len, _ = x.size() - - cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks - if self.pos_embed is not None: - cls_tokens = cls_tokens + self.pos_embed[:, :1, :] - x = torch.cat((cls_tokens, x), dim=1) - x = self.pos_drop(x) - - features = [] - training_window_size = torch.tensor([Hp, Wp]) - - rel_pos_bias = self.rel_pos_bias(training_window_size) if self.rel_pos_bias is not None else None - - for i, blk in enumerate(self.blocks): - if self.use_checkpoint: - x = checkpoint.checkpoint(blk, x, rel_pos_bias, training_window_size) - else: - x = blk(x, rel_pos_bias=rel_pos_bias, training_window_size=training_window_size) - if i in self.out_indices: - xp = x[:, 1:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp) - features.append(xp.contiguous()) - - ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4] - for i in range(len(features)): - features[i] = ops[i](features[i]) - - feat_out = {} - - for name, value in zip(self.out_features, features): - feat_out[name] = value - - return feat_out - - def forward(self, x): - x = self.forward_features(x) - return x - - -def beit_base_patch16(pretrained=False, **kwargs): - model = BEiT( - patch_size=16, - embed_dim=768, - depth=12, - num_heads=12, - mlp_ratio=4, - qkv_bias=True, - norm_layer=partial(nn.LayerNorm, eps=1e-6), - init_values=None, - **kwargs) - model.default_cfg = _cfg() - return model - -def beit_large_patch16(pretrained=False, **kwargs): - model = BEiT( - patch_size=16, - embed_dim=1024, - depth=24, - num_heads=16, - mlp_ratio=4, - qkv_bias=True, - norm_layer=partial(nn.LayerNorm, eps=1e-6), - init_values=None, - **kwargs) - model.default_cfg = _cfg() - return model - -def dit_base_patch16(pretrained=False, **kwargs): - model = BEiT( - patch_size=16, - embed_dim=768, - depth=12, - num_heads=12, - mlp_ratio=4, - qkv_bias=True, - norm_layer=partial(nn.LayerNorm, eps=1e-6), - init_values=0.1, - **kwargs) - model.default_cfg = _cfg() - return model - -def dit_large_patch16(pretrained=False, **kwargs): - model = BEiT( - patch_size=16, - embed_dim=1024, - depth=24, - num_heads=16, - mlp_ratio=4, - qkv_bias=True, - norm_layer=partial(nn.LayerNorm, eps=1e-6), - init_values=1e-5, - **kwargs) - model.default_cfg = _cfg() - return model - -if __name__ == '__main__': - model = BEiT(use_checkpoint=True, use_shared_rel_pos_bias=True) - model = model.to("cuda:0") - input1 = torch.rand(2, 3, 512, 762).to("cuda:0") - input2 = torch.rand(2, 3, 800, 1200).to("cuda:0") - input3 = torch.rand(2, 3, 720, 1000).to("cuda:0") - output1 = model(input1) - output2 = model(input2) - output3 = model(input3) - print("all done") diff --git a/spaces/EronSamez/RVC_HFmeu/demucs/__main__.py b/spaces/EronSamez/RVC_HFmeu/demucs/__main__.py deleted file mode 100644 index 5148f20623bdaa827777558844796ded1876d7d0..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/demucs/__main__.py +++ /dev/null @@ -1,317 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import json -import math -import os -import sys -import time -from dataclasses import dataclass, field - -import torch as th -from torch import distributed, nn -from torch.nn.parallel.distributed import DistributedDataParallel - -from .augment import FlipChannels, FlipSign, Remix, Scale, Shift -from .compressed import get_compressed_datasets -from .model import Demucs -from .parser import get_name, get_parser -from .raw import Rawset -from .repitch import RepitchedWrapper -from .pretrained import load_pretrained, SOURCES -from .tasnet import ConvTasNet -from .test import evaluate -from .train import train_model, validate_model -from .utils import (human_seconds, load_model, save_model, get_state, - save_state, sizeof_fmt, get_quantizer) -from .wav import get_wav_datasets, get_musdb_wav_datasets - - -@dataclass -class SavedState: - metrics: list = field(default_factory=list) - last_state: dict = None - best_state: dict = None - optimizer: dict = None - - -def main(): - parser = get_parser() - args = parser.parse_args() - name = get_name(parser, args) - print(f"Experiment {name}") - - if args.musdb is None and args.rank == 0: - print( - "You must provide the path to the MusDB dataset with the --musdb flag. " - "To download the MusDB dataset, see https://sigsep.github.io/datasets/musdb.html.", - file=sys.stderr) - sys.exit(1) - - eval_folder = args.evals / name - eval_folder.mkdir(exist_ok=True, parents=True) - args.logs.mkdir(exist_ok=True) - metrics_path = args.logs / f"{name}.json" - eval_folder.mkdir(exist_ok=True, parents=True) - args.checkpoints.mkdir(exist_ok=True, parents=True) - args.models.mkdir(exist_ok=True, parents=True) - - if args.device is None: - device = "cpu" - if th.cuda.is_available(): - device = "cuda" - else: - device = args.device - - th.manual_seed(args.seed) - # Prevents too many threads to be started when running `museval` as it can be quite - # inefficient on NUMA architectures. - os.environ["OMP_NUM_THREADS"] = "1" - os.environ["MKL_NUM_THREADS"] = "1" - - if args.world_size > 1: - if device != "cuda" and args.rank == 0: - print("Error: distributed training is only available with cuda device", file=sys.stderr) - sys.exit(1) - th.cuda.set_device(args.rank % th.cuda.device_count()) - distributed.init_process_group(backend="nccl", - init_method="tcp://" + args.master, - rank=args.rank, - world_size=args.world_size) - - checkpoint = args.checkpoints / f"{name}.th" - checkpoint_tmp = args.checkpoints / f"{name}.th.tmp" - if args.restart and checkpoint.exists() and args.rank == 0: - checkpoint.unlink() - - if args.test or args.test_pretrained: - args.epochs = 1 - args.repeat = 0 - if args.test: - model = load_model(args.models / args.test) - else: - model = load_pretrained(args.test_pretrained) - elif args.tasnet: - model = ConvTasNet(audio_channels=args.audio_channels, - samplerate=args.samplerate, X=args.X, - segment_length=4 * args.samples, - sources=SOURCES) - else: - model = Demucs( - audio_channels=args.audio_channels, - channels=args.channels, - context=args.context, - depth=args.depth, - glu=args.glu, - growth=args.growth, - kernel_size=args.kernel_size, - lstm_layers=args.lstm_layers, - rescale=args.rescale, - rewrite=args.rewrite, - stride=args.conv_stride, - resample=args.resample, - normalize=args.normalize, - samplerate=args.samplerate, - segment_length=4 * args.samples, - sources=SOURCES, - ) - model.to(device) - if args.init: - model.load_state_dict(load_pretrained(args.init).state_dict()) - - if args.show: - print(model) - size = sizeof_fmt(4 * sum(p.numel() for p in model.parameters())) - print(f"Model size {size}") - return - - try: - saved = th.load(checkpoint, map_location='cpu') - except IOError: - saved = SavedState() - - optimizer = th.optim.Adam(model.parameters(), lr=args.lr) - - quantizer = None - quantizer = get_quantizer(model, args, optimizer) - - if saved.last_state is not None: - model.load_state_dict(saved.last_state, strict=False) - if saved.optimizer is not None: - optimizer.load_state_dict(saved.optimizer) - - model_name = f"{name}.th" - if args.save_model: - if args.rank == 0: - model.to("cpu") - model.load_state_dict(saved.best_state) - save_model(model, quantizer, args, args.models / model_name) - return - elif args.save_state: - model_name = f"{args.save_state}.th" - if args.rank == 0: - model.to("cpu") - model.load_state_dict(saved.best_state) - state = get_state(model, quantizer) - save_state(state, args.models / model_name) - return - - if args.rank == 0: - done = args.logs / f"{name}.done" - if done.exists(): - done.unlink() - - augment = [Shift(args.data_stride)] - if args.augment: - augment += [FlipSign(), FlipChannels(), Scale(), - Remix(group_size=args.remix_group_size)] - augment = nn.Sequential(*augment).to(device) - print("Agumentation pipeline:", augment) - - if args.mse: - criterion = nn.MSELoss() - else: - criterion = nn.L1Loss() - - # Setting number of samples so that all convolution windows are full. - # Prevents hard to debug mistake with the prediction being shifted compared - # to the input mixture. - samples = model.valid_length(args.samples) - print(f"Number of training samples adjusted to {samples}") - samples = samples + args.data_stride - if args.repitch: - # We need a bit more audio samples, to account for potential - # tempo change. - samples = math.ceil(samples / (1 - 0.01 * args.max_tempo)) - - args.metadata.mkdir(exist_ok=True, parents=True) - if args.raw: - train_set = Rawset(args.raw / "train", - samples=samples, - channels=args.audio_channels, - streams=range(1, len(model.sources) + 1), - stride=args.data_stride) - - valid_set = Rawset(args.raw / "valid", channels=args.audio_channels) - elif args.wav: - train_set, valid_set = get_wav_datasets(args, samples, model.sources) - elif args.is_wav: - train_set, valid_set = get_musdb_wav_datasets(args, samples, model.sources) - else: - train_set, valid_set = get_compressed_datasets(args, samples) - - if args.repitch: - train_set = RepitchedWrapper( - train_set, - proba=args.repitch, - max_tempo=args.max_tempo) - - best_loss = float("inf") - for epoch, metrics in enumerate(saved.metrics): - print(f"Epoch {epoch:03d}: " - f"train={metrics['train']:.8f} " - f"valid={metrics['valid']:.8f} " - f"best={metrics['best']:.4f} " - f"ms={metrics.get('true_model_size', 0):.2f}MB " - f"cms={metrics.get('compressed_model_size', 0):.2f}MB " - f"duration={human_seconds(metrics['duration'])}") - best_loss = metrics['best'] - - if args.world_size > 1: - dmodel = DistributedDataParallel(model, - device_ids=[th.cuda.current_device()], - output_device=th.cuda.current_device()) - else: - dmodel = model - - for epoch in range(len(saved.metrics), args.epochs): - begin = time.time() - model.train() - train_loss, model_size = train_model( - epoch, train_set, dmodel, criterion, optimizer, augment, - quantizer=quantizer, - batch_size=args.batch_size, - device=device, - repeat=args.repeat, - seed=args.seed, - diffq=args.diffq, - workers=args.workers, - world_size=args.world_size) - model.eval() - valid_loss = validate_model( - epoch, valid_set, model, criterion, - device=device, - rank=args.rank, - split=args.split_valid, - overlap=args.overlap, - world_size=args.world_size) - - ms = 0 - cms = 0 - if quantizer and args.rank == 0: - ms = quantizer.true_model_size() - cms = quantizer.compressed_model_size(num_workers=min(40, args.world_size * 10)) - - duration = time.time() - begin - if valid_loss < best_loss and ms <= args.ms_target: - best_loss = valid_loss - saved.best_state = { - key: value.to("cpu").clone() - for key, value in model.state_dict().items() - } - - saved.metrics.append({ - "train": train_loss, - "valid": valid_loss, - "best": best_loss, - "duration": duration, - "model_size": model_size, - "true_model_size": ms, - "compressed_model_size": cms, - }) - if args.rank == 0: - json.dump(saved.metrics, open(metrics_path, "w")) - - saved.last_state = model.state_dict() - saved.optimizer = optimizer.state_dict() - if args.rank == 0 and not args.test: - th.save(saved, checkpoint_tmp) - checkpoint_tmp.rename(checkpoint) - - print(f"Epoch {epoch:03d}: " - f"train={train_loss:.8f} valid={valid_loss:.8f} best={best_loss:.4f} ms={ms:.2f}MB " - f"cms={cms:.2f}MB " - f"duration={human_seconds(duration)}") - - if args.world_size > 1: - distributed.barrier() - - del dmodel - model.load_state_dict(saved.best_state) - if args.eval_cpu: - device = "cpu" - model.to(device) - model.eval() - evaluate(model, args.musdb, eval_folder, - is_wav=args.is_wav, - rank=args.rank, - world_size=args.world_size, - device=device, - save=args.save, - split=args.split_valid, - shifts=args.shifts, - overlap=args.overlap, - workers=args.eval_workers) - model.to("cpu") - if args.rank == 0: - if not (args.test or args.test_pretrained): - save_model(model, quantizer, args, args.models / model_name) - print("done") - done.write_text("done") - - -if __name__ == "__main__": - main() diff --git a/spaces/EuroPython2022/pyro-vision/app.py b/spaces/EuroPython2022/pyro-vision/app.py deleted file mode 100644 index a3c656848cd7f1f10b72ddc80a0ea408ffe17c81..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/pyro-vision/app.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (C) 2022, Pyronear. - -# This program is licensed under the Apache License 2.0. -# See LICENSE or go to for full license details. - -import argparse -import json - -import gradio as gr -import numpy as np -import onnxruntime -from huggingface_hub import hf_hub_download -from PIL import Image - -REPO = "pyronear/rexnet1_0x" - - -# Download model config & checkpoint -with open(hf_hub_download(REPO, filename="config.json"), "rb") as f: - cfg = json.load(f) - -ort_session = onnxruntime.InferenceSession(hf_hub_download(REPO, filename="model.onnx")) - -def preprocess_image(pil_img: Image.Image) -> np.ndarray: - """Preprocess an image for inference - - Args: - pil_img: a valid pillow image - - Returns: - the resized and normalized image of shape (1, C, H, W) - """ - - # Resizing (PIL takes (W, H) order for resizing) - img = pil_img.resize(cfg["input_shape"][-2:][::-1], Image.BILINEAR) - # (H, W, C) --> (C, H, W) - img = np.asarray(img).transpose((2, 0, 1)).astype(np.float32) / 255 - # Normalization - img -= np.array(cfg["mean"])[:, None, None] - img /= np.array(cfg["std"])[:, None, None] - - return img[None, ...] - -def predict(image): - # Preprocessing - np_img = preprocess_image(image) - ort_input = {ort_session.get_inputs()[0].name: np_img} - - # Inference - ort_out = ort_session.run(None, ort_input) - # Post-processing - probs = 1 / (1 + np.exp(-ort_out[0][0])) - - return {class_name: float(conf) for class_name, conf in zip(cfg["classes"], probs)} - - -img = gr.inputs.Image(type="pil") -outputs = gr.outputs.Label(num_top_classes=1) - - -gr.Interface( - fn=predict, - inputs=[img], - outputs=outputs, - title="PyroVision: image classification demo", - article=( - "

    " - "Github Repo | " - "Documentation

    " - ), - live=True, -).launch() diff --git a/spaces/FSDL-Fashion/fashion_img_search/README.md b/spaces/FSDL-Fashion/fashion_img_search/README.md deleted file mode 100644 index 2489ef59995383c3f55e6c62244ed34817ece31c..0000000000000000000000000000000000000000 --- a/spaces/FSDL-Fashion/fashion_img_search/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Fashion Img Search -emoji: 🐢 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/FaceOnLive/Face-Recognition-SDK/facewrapper/facewrapper.py b/spaces/FaceOnLive/Face-Recognition-SDK/facewrapper/facewrapper.py deleted file mode 100644 index 1601c4e2af93690f7b1b9b6e294caf9869a3e6d1..0000000000000000000000000000000000000000 --- a/spaces/FaceOnLive/Face-Recognition-SDK/facewrapper/facewrapper.py +++ /dev/null @@ -1,32 +0,0 @@ -import ctypes, ctypes.util -from ctypes import * -from numpy.ctypeslib import ndpointer -import sys -import os - -lib_path = os.path.abspath(os.path.dirname(__file__)) + '/libs/libttvfaceengine6.so' -liveness_engine = cdll.LoadLibrary(lib_path) - -ttv_version = liveness_engine.ttv_version -ttv_version.argtypes = [] -ttv_version.restype = ctypes.c_char_p - -ttv_get_hwid = liveness_engine.ttv_get_hwid -ttv_get_hwid.argtypes = [] -ttv_get_hwid.restype = ctypes.c_char_p - -ttv_init = liveness_engine.ttv_init -ttv_init.argtypes = [ctypes.c_char_p, ctypes.c_char_p] -ttv_init.restype = ctypes.c_int32 - -ttv_init_offline = liveness_engine.ttv_init_offline -ttv_init_offline.argtypes = [ctypes.c_char_p, ctypes.c_char_p] -ttv_init_offline.restype = ctypes.c_int32 - -ttv_extract_feature = liveness_engine.ttv_extract_feature -ttv_extract_feature.argtypes = [ndpointer(ctypes.c_ubyte, flags='C_CONTIGUOUS'), ctypes.c_int32, ctypes.c_int32, ndpointer(ctypes.c_int32, flags='C_CONTIGUOUS'), ndpointer(ctypes.c_ubyte, flags='C_CONTIGUOUS'), ndpointer(ctypes.c_int32, flags='C_CONTIGUOUS')] -ttv_extract_feature.restype = ctypes.c_int - -ttv_compare_feature = liveness_engine.ttv_compare_feature -ttv_compare_feature.argtypes = [ndpointer(ctypes.c_ubyte, flags='C_CONTIGUOUS'), ndpointer(ctypes.c_ubyte, flags='C_CONTIGUOUS')] -ttv_compare_feature.restype = ctypes.c_double diff --git a/spaces/FlippFuzz/whisper-webui/app-local.py b/spaces/FlippFuzz/whisper-webui/app-local.py deleted file mode 100644 index c7717d096ca5f95177f0dba03cd62ca729bae9f3..0000000000000000000000000000000000000000 --- a/spaces/FlippFuzz/whisper-webui/app-local.py +++ /dev/null @@ -1,5 +0,0 @@ -# Run the app with no audio file restrictions -from app import create_ui -from src.config import ApplicationConfig - -create_ui(ApplicationConfig.create_default(input_audio_max_duration=-1)) \ No newline at end of file diff --git a/spaces/FridaZuley/RVC_HFKawaii/infer/lib/infer_pack/onnx_inference.py b/spaces/FridaZuley/RVC_HFKawaii/infer/lib/infer_pack/onnx_inference.py deleted file mode 100644 index 6633659fc83b19d82611d3c9cc840e9c547734d0..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/infer/lib/infer_pack/onnx_inference.py +++ /dev/null @@ -1,149 +0,0 @@ -import librosa -import numpy as np -import onnxruntime -import soundfile - -import logging - -logger = logging.getLogger(__name__) - - -class ContentVec: - def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None): - logger.info("Load model(s) from {}".format(vec_path)) - if device == "cpu" or device is None: - providers = ["CPUExecutionProvider"] - elif device == "cuda": - providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] - elif device == "dml": - providers = ["DmlExecutionProvider"] - else: - raise RuntimeError("Unsportted Device") - self.model = onnxruntime.InferenceSession(vec_path, providers=providers) - - def __call__(self, wav): - return self.forward(wav) - - def forward(self, wav): - feats = wav - if feats.ndim == 2: # double channels - feats = feats.mean(-1) - assert feats.ndim == 1, feats.ndim - feats = np.expand_dims(np.expand_dims(feats, 0), 0) - onnx_input = {self.model.get_inputs()[0].name: feats} - logits = self.model.run(None, onnx_input)[0] - return logits.transpose(0, 2, 1) - - -def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs): - if f0_predictor == "pm": - from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor - - f0_predictor_object = PMF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - elif f0_predictor == "harvest": - from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import ( - HarvestF0Predictor, - ) - - f0_predictor_object = HarvestF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - elif f0_predictor == "dio": - from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor - - f0_predictor_object = DioF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - else: - raise Exception("Unknown f0 predictor") - return f0_predictor_object - - -class OnnxRVC: - def __init__( - self, - model_path, - sr=40000, - hop_size=512, - vec_path="vec-768-layer-12", - device="cpu", - ): - vec_path = f"pretrained/{vec_path}.onnx" - self.vec_model = ContentVec(vec_path, device) - if device == "cpu" or device is None: - providers = ["CPUExecutionProvider"] - elif device == "cuda": - providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] - elif device == "dml": - providers = ["DmlExecutionProvider"] - else: - raise RuntimeError("Unsportted Device") - self.model = onnxruntime.InferenceSession(model_path, providers=providers) - self.sampling_rate = sr - self.hop_size = hop_size - - def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd): - onnx_input = { - self.model.get_inputs()[0].name: hubert, - self.model.get_inputs()[1].name: hubert_length, - self.model.get_inputs()[2].name: pitch, - self.model.get_inputs()[3].name: pitchf, - self.model.get_inputs()[4].name: ds, - self.model.get_inputs()[5].name: rnd, - } - return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16) - - def inference( - self, - raw_path, - sid, - f0_method="dio", - f0_up_key=0, - pad_time=0.5, - cr_threshold=0.02, - ): - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - f0_predictor = get_f0_predictor( - f0_method, - hop_length=self.hop_size, - sampling_rate=self.sampling_rate, - threshold=cr_threshold, - ) - wav, sr = librosa.load(raw_path, sr=self.sampling_rate) - org_length = len(wav) - if org_length / sr > 50.0: - raise RuntimeError("Reached Max Length") - - wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000) - wav16k = wav16k - - hubert = self.vec_model(wav16k) - hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32) - hubert_length = hubert.shape[1] - - pitchf = f0_predictor.compute_f0(wav, hubert_length) - pitchf = pitchf * 2 ** (f0_up_key / 12) - pitch = pitchf.copy() - f0_mel = 1127 * np.log(1 + pitch / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - pitch = np.rint(f0_mel).astype(np.int64) - - pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32) - pitch = pitch.reshape(1, len(pitch)) - ds = np.array([sid]).astype(np.int64) - - rnd = np.random.randn(1, 192, hubert_length).astype(np.float32) - hubert_length = np.array([hubert_length]).astype(np.int64) - - out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze() - out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant") - return out_wav[0:org_length] diff --git a/spaces/GIZ/SDSN-demo/ver0.1 scripts/coherence.py b/spaces/GIZ/SDSN-demo/ver0.1 scripts/coherence.py deleted file mode 100644 index a7ba72c0e97df9134cdbf51c9d877fc03680e8b4..0000000000000000000000000000000000000000 --- a/spaces/GIZ/SDSN-demo/ver0.1 scripts/coherence.py +++ /dev/null @@ -1,267 +0,0 @@ -# set path -import glob, os, sys; sys.path.append('../udfPreprocess') - -#import helper -import udfPreprocess.docPreprocessing as pre -import udfPreprocess.cleaning as clean - -#import needed libraries -import seaborn as sns -from pandas import DataFrame -from sentence_transformers import SentenceTransformer, CrossEncoder, util -from sklearn.metrics.pairwise import cosine_similarity -# from keybert import KeyBERT -from transformers import pipeline -import matplotlib.pyplot as plt -import numpy as np -import streamlit as st -import pandas as pd -from rank_bm25 import BM25Okapi -from sklearn.feature_extraction import _stop_words -import string -from tqdm.autonotebook import tqdm -import numpy as np -import urllib.request -import ast -import tempfile -import sqlite3 -import json -import urllib.request -import ast -import docx -from docx.shared import Inches -from docx.shared import Pt -from docx.enum.style import WD_STYLE_TYPE - -def app(): - # Sidebar - st.sidebar.title('Check Coherence') - st.sidebar.write(' ') - with open('ndcs/countryList.txt') as dfile: - countryList = dfile.read() - - countryList = ast.literal_eval(countryList) - countrynames = list(countryList.keys()) - - option = st.sidebar.selectbox('Select Country', (countrynames)) - countryCode = countryList[option] - - - with st.container(): - st.markdown("

    Check Coherence of Policy Document with NDCs

    ", unsafe_allow_html=True) - st.write(' ') - st.write(' ') - - with st.expander("ℹ️ - About this app", expanded=True): - - st.write( - """ - The *Check Coherence* app is an easy-to-use interface built in Streamlit for doing analysis of policy document and finding the coherence between NDCs/New-Updated NDCs- developed by GIZ Data and the Sustainable Development Solution Network. - """ - ) - - st.markdown("") - - st.markdown("") - st.markdown("## 📌 Step One: Upload document of the country selected ") - - with st.container(): - docs = None - # asking user for either upload or select existing doc - choice = st.radio(label = 'Select the Document', - help = 'You can upload the document \ - or else you can try a example document.', - options = ('Upload Document', 'Try Example'), - horizontal = True) - - if choice == 'Upload Document': - uploaded_file = st.file_uploader('Upload the File', type=['pdf', 'docx', 'txt']) - if uploaded_file is not None: - with tempfile.NamedTemporaryFile(mode="wb") as temp: - bytes_data = uploaded_file.getvalue() - temp.write(bytes_data) - - st.write("Uploaded Filename: ", uploaded_file.name) - file_name = uploaded_file.name - file_path = temp.name - docs = pre.load_document(file_path, file_name) - haystackDoc, dataframeDoc, textData, paraList = clean.preprocessing(docs) - - else: - # listing the options - option = st.selectbox('Select the example document', - ('South Africa:Low Emission strategy', - 'Ethiopia: 10 Year Development Plan')) - if option is 'South Africa:Low Emission strategy': - file_name = file_path = 'sample/South Africa_s Low Emission Development Strategy.txt' - countryCode = countryList['South Africa'] - st.write("Selected document:", file_name.split('/')[1]) - # with open('sample/South Africa_s Low Emission Development Strategy.txt') as dfile: - # file = open('sample/South Africa_s Low Emission Development Strategy.txt', 'wb') - else: - # with open('sample/Ethiopia_s_2021_10 Year Development Plan.txt') as dfile: - file_name = file_path = 'sample/Ethiopia_s_2021_10 Year Development Plan.txt' - countryCode = countryList['Ethiopia'] - st.write("Selected document:", file_name.split('/')[1]) - - if option is not None: - docs = pre.load_document(file_path,file_name) - haystackDoc, dataframeDoc, textData, paraList = clean.preprocessing(docs) - - with open('ndcs/cca.txt', encoding='utf-8', errors='ignore') as dfile: - cca_sent = dfile.read() - - cca_sent = ast.literal_eval(cca_sent) - - with open('ndcs/ccm.txt', encoding='utf-8', errors='ignore') as dfile: - ccm_sent = dfile.read() - - ccm_sent = ast.literal_eval(ccm_sent) - - with open('ndcs/countryList.txt') as dfile: - countryList = dfile.read() - - countryList = ast.literal_eval(countryList) - - def get_document(countryCode: str): - link = "https://klimalog.die-gdi.de/ndc/open-data/dataset.json" - with urllib.request.urlopen(link) as urlfile: - data = json.loads(urlfile.read()) - categoriesData = {} - categoriesData['categories']= data['categories'] - categoriesData['subcategories']= data['subcategories'] - keys_sub = categoriesData['subcategories'].keys() - documentType= 'NDCs' - if documentType in data.keys(): - if countryCode in data[documentType].keys(): - get_dict = {} - for key, value in data[documentType][countryCode].items(): - if key not in ['country_name','region_id', 'region_name']: - get_dict[key] = value['classification'] - else: - get_dict[key] = value - else: - return None - else: - return None - - country = {} - for key in categoriesData['categories']: - country[key]= {} - for key,value in categoriesData['subcategories'].items(): - country[value['category']][key] = get_dict[key] - - return country - - # country_ndc = get_document('NDCs', countryList[option]) - - def countrySpecificCCA(cca_sent, threshold, countryCode): - temp = {} - doc = get_document(countryCode) - for key,value in cca_sent.items(): - id_ = doc['climate change adaptation'][key]['id'] - if id_ >threshold: - temp[key] = value['id'][id_] - return temp - - - def countrySpecificCCM(ccm_sent, threshold, countryCode): - temp = {} - doc = get_document(countryCode) - for key,value in ccm_sent.items(): - id_ = doc['climate change mitigation'][key]['id'] - if id_ >threshold: - temp[key] = value['id'][id_] - - return temp - - - - if docs is not None: - sent_cca = countrySpecificCCA(cca_sent,1,countryCode) - sent_ccm = countrySpecificCCM(ccm_sent,1,countryCode) - #st.write(sent_ccm) - @st.cache(allow_output_mutation=True) - def load_sentenceTransformer(name): - return SentenceTransformer(name) - model = load_sentenceTransformer('all-MiniLM-L6-v2') - - document_embeddings = model.encode(paraList, show_progress_bar=True) - - genre = st.radio( "Select Category",('Climate Change Adaptation', 'Climate Change Mitigation')) - if genre == 'Climate Change Adaptation': - sent_dict = sent_cca - sent_labels = [] - for key,sent in sent_dict.items(): - sent_labels.append(sent) - label_embeddings = model.encode(sent_labels, show_progress_bar=True) - similarity_high_threshold = 0.55 - similarity_matrix = cosine_similarity(label_embeddings, document_embeddings) - label_indices, paragraph_indices = np.where(similarity_matrix>similarity_high_threshold) - - positive_indices = list(zip(label_indices.tolist(), paragraph_indices.tolist())) - - - else: - sent_dict = sent_ccm - sent_labels = [] - for key,sent in sent_dict.items(): - sent_labels.append(sent) - label_embeddings = model.encode(sent_labels, show_progress_bar=True) - similarity_high_threshold = 0.55 - similarity_matrix = cosine_similarity(label_embeddings, document_embeddings) - label_indices, paragraph_indices = np.where(similarity_matrix>similarity_high_threshold) - - positive_indices = list(zip(label_indices.tolist(), paragraph_indices.tolist())) - - - # sent_labels = [] - # for key,sent in sent_dict.items(): - # sent_labels.append(sent) - - - # label_embeddings = model.encode(sent_labels, show_progress_bar=True) - - #similarity_high_threshold = 0.55 - # similarity_matrix = cosine_similarity(label_embeddings, document_embeddings) - #label_indices, paragraph_indices = np.where(similarity_matrix>similarity_high_threshold) - - #positive_indices = list(zip(label_indices.tolist(), paragraph_indices.tolist())) - document = docx.Document() - document.add_heading('Document name:{}'.format(file_name), 2) - section = document.sections[0] - - # Calling the footer - footer = section.footer - - # Calling the paragraph already present in - # the footer section - footer_para = footer.paragraphs[0] - - font_styles = document.styles - font_charstyle = font_styles.add_style('CommentsStyle', WD_STYLE_TYPE.CHARACTER) - font_object = font_charstyle.font - font_object.size = Pt(7) - # Adding the centered zoned footer - footer_para.add_run('''\tPowered by GIZ Data and the Sustainable Development Solution Network hosted at Hugging-Face spaces: https://huggingface.co/spaces/ppsingh/streamlit_dev''', style='CommentsStyle') - - document.add_paragraph("Country Code for which NDC is carried out {}".format(countryCode)) - - for _label_idx, _paragraph_idx in positive_indices: - st.write("This paragraph: \n") - document.add_paragraph("This paragraph: \n") - st.write(paraList[_paragraph_idx]) - st.write(f"Is relevant to: \n {list(sent_dict.keys())[_label_idx]}") - document.add_paragraph(f"Is relevant to: \n {list(sent_dict.keys())[_label_idx]}") - st.write('-'*10) - document.add_paragraph('-'*10) - - document.save('demo.docx') - with open("demo.docx", "rb") as file: - btn = st.download_button( - label="Download file", - data=file, - file_name="demo.docx", - mime="txt/docx" - ) - diff --git a/spaces/GXSA/bingo/src/components/ui/sheet.tsx b/spaces/GXSA/bingo/src/components/ui/sheet.tsx deleted file mode 100644 index c9f5ce0f81a91067bb013e988a07eb1e6bf6953b..0000000000000000000000000000000000000000 --- a/spaces/GXSA/bingo/src/components/ui/sheet.tsx +++ /dev/null @@ -1,122 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SheetPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Sheet = SheetPrimitive.Root - -const SheetTrigger = SheetPrimitive.Trigger - -const SheetClose = SheetPrimitive.Close - -const SheetPortal = ({ - className, - children, - ...props -}: SheetPrimitive.DialogPortalProps) => ( - - {children} - -) -SheetPortal.displayName = SheetPrimitive.Portal.displayName - -const SheetOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - -)) -SheetOverlay.displayName = SheetPrimitive.Overlay.displayName - -const SheetContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - {children} - - - Close - - - -)) -SheetContent.displayName = SheetPrimitive.Content.displayName - -const SheetHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -SheetHeader.displayName = 'SheetHeader' - -const SheetFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -SheetFooter.displayName = 'SheetFooter' - -const SheetTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SheetTitle.displayName = SheetPrimitive.Title.displayName - -const SheetDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SheetDescription.displayName = SheetPrimitive.Description.displayName - -export { - Sheet, - SheetTrigger, - SheetClose, - SheetContent, - SheetHeader, - SheetFooter, - SheetTitle, - SheetDescription -} diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/tasks/grippers.py b/spaces/Gen-Sim/Gen-Sim/cliport/tasks/grippers.py deleted file mode 100644 index f6f8d55292eb7f394a825ceee668898b58aae11f..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/tasks/grippers.py +++ /dev/null @@ -1,249 +0,0 @@ -"""Classes to handle gripper dynamics.""" - -import os - -import numpy as np -from cliport.utils import pybullet_utils - -import pybullet as p - -SPATULA_BASE_URDF = 'ur5/spatula/spatula-base.urdf' -SUCTION_BASE_URDF = 'ur5/suction/suction-base.urdf' -SUCTION_HEAD_URDF = 'ur5/suction/suction-head.urdf' - - -class Gripper: - """Base gripper class.""" - - def __init__(self, assets_root): - self.assets_root = assets_root - self.activated = False - - def step(self): - """This function can be used to create gripper-specific behaviors.""" - return - - def activate(self, objects): - del objects - return - - def release(self): - return - - -class Spatula(Gripper): - """Simulate simple spatula for pushing.""" - - def __init__(self, assets_root=None, robot=None, ee=None, obj_ids=None): - """Creates spatula and 'attaches' it to the robot.""" - if assets_root is None: - return - super().__init__(assets_root) - - # Load spatula model. - pose = ((0.487, 0.109, 0.438), p.getQuaternionFromEuler((np.pi, 0, 0))) - self.base_urdf_path = os.path.join(self.assets_root, SPATULA_BASE_URDF) - - base = pybullet_utils.load_urdf( - p, self.base_urdf_path, pose[0], pose[1]) - self.base = base - p.createConstraint( - parentBodyUniqueId=robot, - parentLinkIndex=ee, - childBodyUniqueId=base, - childLinkIndex=-1, - jointType=p.JOINT_FIXED, - jointAxis=(0, 0, 0), - parentFramePosition=(0, 0, 0), - childFramePosition=(0, 0, 0.01)) - - -class Suction(Gripper): - """Simulate simple suction dynamics.""" - - def __init__(self, assets_root, robot, ee, obj_ids): - """Creates suction and 'attaches' it to the robot. - - Has special cases when dealing with rigid vs deformables. For rigid, - only need to check contact_constraint for any constraint. For soft - bodies (i.e., cloth or bags), use cloth_threshold to check distances - from gripper body (self.body) to any vertex in the cloth mesh. We - need correct code logic to handle gripping potentially a rigid or a - deformable (and similarly for releasing). - - To be clear on terminology: 'deformable' here should be interpreted - as a PyBullet 'softBody', which includes cloths and bags. There's - also cables, but those are formed by connecting rigid body beads, so - they can use standard 'rigid body' grasping code. - - To get the suction gripper pose, use p.getLinkState(self.body, 0), - and not p.getBasePositionAndOrientation(self.body) as the latter is - about z=0.03m higher and empirically seems worse. - - Args: - assets_root: str for root directory with assets. - robot: int representing PyBullet ID of robot. - ee: int representing PyBullet ID of end effector link. - obj_ids: list of PyBullet IDs of all suctionable objects in the env. - """ - super().__init__(assets_root) - - # Load suction gripper base model (visual only). - pose = ((0.487, 0.109, 0.438), p.getQuaternionFromEuler((np.pi, 0, 0))) - self.base_urdf_path = os.path.join(self.assets_root, SUCTION_BASE_URDF) - - base = pybullet_utils.load_urdf( - p, self.base_urdf_path, pose[0], pose[1]) - self.base = base - p.createConstraint( - parentBodyUniqueId=robot, - parentLinkIndex=ee, - childBodyUniqueId=base, - childLinkIndex=-1, - jointType=p.JOINT_FIXED, - jointAxis=(0, 0, 0), - parentFramePosition=(0, 0, 0), - childFramePosition=(0, 0, 0.01)) - - # Load suction tip model (visual and collision) with compliance. - # urdf = 'assets/ur5/suction/suction-head.urdf' - pose = ((0.487, 0.109, 0.347), p.getQuaternionFromEuler((np.pi, 0, 0))) - self.urdf_path = os.path.join(self.assets_root, SUCTION_HEAD_URDF) - self.body = pybullet_utils.load_urdf( - p, self.urdf_path, pose[0], pose[1]) - constraint_id = p.createConstraint( - parentBodyUniqueId=robot, - parentLinkIndex=ee, - childBodyUniqueId=self.body, - childLinkIndex=-1, - jointType=p.JOINT_FIXED, - jointAxis=(0, 0, 0), - parentFramePosition=(0, 0, 0), - childFramePosition=(0, 0, -0.08)) - p.changeConstraint(constraint_id, maxForce=100) - - # Reference to object IDs in environment for simulating suction. - self.obj_ids = obj_ids - - # Indicates whether gripper is gripping anything (rigid or def). - self.activated = False - - # For gripping and releasing rigid objects. - self.contact_constraint = None - - # Defaults for deformable parameters, and can override in tasks. - self.def_ignore = 0.035 # TODO(daniel) check if this is needed - self.def_threshold = 0.030 - self.def_nb_anchors = 1 - - # Track which deformable is being gripped (if any), and anchors. - self.def_grip_item = None - self.def_grip_anchors = [] - - # Determines release when gripped deformable touches a rigid/def. - # TODO(daniel) should check if the code uses this -- not sure? - self.def_min_vetex = None - self.def_min_distance = None - - # Determines release when a gripped rigid touches defs (e.g. cloth-cover). - self.init_grip_distance = None - self.init_grip_item = None - - def activate(self): - """Simulate suction using a rigid fixed constraint to contacted object.""" - # TODO(andyzeng): check deformables logic. - # del def_ids - - if not self.activated: - points = p.getContactPoints(bodyA=self.body, linkIndexA=0) - # print(points) - if points: - - # Handle contact between suction with a rigid object. - for point in points: - obj_id, contact_link = point[2], point[4] - if obj_id in self.obj_ids['rigid']: - body_pose = p.getLinkState(self.body, 0) - obj_pose = p.getBasePositionAndOrientation(obj_id) - world_to_body = p.invertTransform(body_pose[0], body_pose[1]) - obj_to_body = p.multiplyTransforms(world_to_body[0], - world_to_body[1], - obj_pose[0], obj_pose[1]) - self.contact_constraint = p.createConstraint( - parentBodyUniqueId=self.body, - parentLinkIndex=0, - childBodyUniqueId=obj_id, - childLinkIndex=contact_link, - jointType=p.JOINT_FIXED, - jointAxis=(0, 0, 0), - parentFramePosition=obj_to_body[0], - parentFrameOrientation=obj_to_body[1], - childFramePosition=(0, 0, 0), - childFrameOrientation=(0, 0, 0)) - - self.activated = True - - def release(self): - """Release gripper object, only applied if gripper is 'activated'. - - If suction off, detect contact between gripper and objects. - If suction on, detect contact between picked object and other objects. - - To handle deformables, simply remove constraints (i.e., anchors). - Also reset any relevant variables, e.g., if releasing a rigid, we - should reset init_grip values back to None, which will be re-assigned - in any subsequent grasps. - """ - if self.activated: - self.activated = False - - # Release gripped rigid object (if any). - if self.contact_constraint is not None: - try: - p.removeConstraint(self.contact_constraint) - self.contact_constraint = None - except: # pylint: disable=bare-except - pass - self.init_grip_distance = None - self.init_grip_item = None - - # Release gripped deformable object (if any). - if self.def_grip_anchors: - for anchor_id in self.def_grip_anchors: - p.removeConstraint(anchor_id) - self.def_grip_anchors = [] - self.def_grip_item = None - self.def_min_vetex = None - self.def_min_distance = None - - def detect_contact(self): - """Detects a contact with a rigid object.""" - body, link = self.body, 0 - if self.activated and self.contact_constraint is not None: - try: - info = p.getConstraintInfo(self.contact_constraint) - body, link = info[2], info[3] - except: # pylint: disable=bare-except - self.contact_constraint = None - pass - - # Get all contact points between the suction and a rigid body. - points = p.getContactPoints(bodyA=body, linkIndexA=link) - # print(points) - # exit() - if self.activated: - points = [point for point in points if point[2] != self.body] - - # # We know if len(points) > 0, contact is made with SOME rigid item. - if points: - return True - - return False - - def check_grasp(self): - """Check a grasp (object in contact?) for picking success.""" - - suctioned_object = None - if self.contact_constraint is not None: - suctioned_object = p.getConstraintInfo(self.contact_constraint)[2] - return suctioned_object is not None diff --git a/spaces/GiordanoB/sumarizacao-abstrativa-portugues/README.md b/spaces/GiordanoB/sumarizacao-abstrativa-portugues/README.md deleted file mode 100644 index b6c50d350bddb3fe1fc3ddb3438c4fdbf4d9e650..0000000000000000000000000000000000000000 --- a/spaces/GiordanoB/sumarizacao-abstrativa-portugues/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Sumarizacao Abstrativa Portugues -emoji: 🚀 -colorFrom: pink -colorTo: gray -sdk: gradio -sdk_version: 3.0.10 -app_file: app.py -pinned: false -license: afl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/Waifu2x/__init__.py b/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/Waifu2x/__init__.py deleted file mode 100644 index 919c67429f059707b271b067f40783c04a42a5ac..0000000000000000000000000000000000000000 --- a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/Waifu2x/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# -*- coding: utf-8 -*- -# file: __init__.py -# time: 05/12/2022 -# author: yangheng -# github: https://github.com/yangheng95 -# huggingface: https://huggingface.co/yangheng -# google scholar: https://scholar.google.com/citations?user=NPq5a_0AAAAJ&hl=en -# Copyright (C) 2021. All Rights Reserved. -from .magnify import ImageMagnifier diff --git a/spaces/Gradio-Blocks/StyleGAN-NADA/op/upfirdn2d_cpu.py b/spaces/Gradio-Blocks/StyleGAN-NADA/op/upfirdn2d_cpu.py deleted file mode 100644 index a0f820b4c81e03598589b1ea6b95cf9bef9b04f8..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/StyleGAN-NADA/op/upfirdn2d_cpu.py +++ /dev/null @@ -1,60 +0,0 @@ -import os - -import torch -from torch.autograd import Function -from torch.nn import functional as F - - - -module_path = os.path.dirname(__file__) - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - out = upfirdn2d_native( - input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1] - ) - - return out - - -def upfirdn2d_native( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 -): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] - ) - out = out[ - :, - max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] - ) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h + down_y) // down_y - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w + down_x) // down_x - - return out.view(-1, channel, out_h, out_w) diff --git a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/model/features.py b/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/model/features.py deleted file mode 100644 index b31b277e02d66aa94013cef914ed035e7f041edc..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/model/features.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2021 DeepMind Technologies Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Code to generate processed features.""" -import copy -from typing import List, Mapping, Tuple -from alphafold.model.tf import input_pipeline -from alphafold.model.tf import proteins_dataset -import ml_collections -import numpy as np -import tensorflow.compat.v1 as tf - -FeatureDict = Mapping[str, np.ndarray] - - -def make_data_config( - config: ml_collections.ConfigDict, - num_res: int, - ) -> Tuple[ml_collections.ConfigDict, List[str]]: - """Makes a data config for the input pipeline.""" - cfg = copy.deepcopy(config.data) - - feature_names = cfg.common.unsupervised_features - if cfg.common.use_templates: - feature_names += cfg.common.template_features - - with cfg.unlocked(): - cfg.eval.crop_size = num_res - - return cfg, feature_names - - -def tf_example_to_features(tf_example: tf.train.Example, - config: ml_collections.ConfigDict, - random_seed: int = 0) -> FeatureDict: - """Converts tf_example to numpy feature dictionary.""" - num_res = int(tf_example.features.feature['seq_length'].int64_list.value[0]) - cfg, feature_names = make_data_config(config, num_res=num_res) - - if 'deletion_matrix_int' in set(tf_example.features.feature): - deletion_matrix_int = ( - tf_example.features.feature['deletion_matrix_int'].int64_list.value) - feat = tf.train.Feature(float_list=tf.train.FloatList( - value=map(float, deletion_matrix_int))) - tf_example.features.feature['deletion_matrix'].CopyFrom(feat) - del tf_example.features.feature['deletion_matrix_int'] - - tf_graph = tf.Graph() - with tf_graph.as_default(), tf.device('/device:CPU:0'): - tf.compat.v1.set_random_seed(random_seed) - tensor_dict = proteins_dataset.create_tensor_dict( - raw_data=tf_example.SerializeToString(), - features=feature_names) - processed_batch = input_pipeline.process_tensors_from_config( - tensor_dict, cfg) - - tf_graph.finalize() - - with tf.Session(graph=tf_graph) as sess: - features = sess.run(processed_batch) - - return {k: v for k, v in features.items() if v.dtype != 'O'} - - -def np_example_to_features(np_example: FeatureDict, - config: ml_collections.ConfigDict, - random_seed: int = 0) -> FeatureDict: - """Preprocesses NumPy feature dict using TF pipeline.""" - np_example = dict(np_example) - num_res = int(np_example['seq_length'][0]) - cfg, feature_names = make_data_config(config, num_res=num_res) - - if 'deletion_matrix_int' in np_example: - np_example['deletion_matrix'] = ( - np_example.pop('deletion_matrix_int').astype(np.float32)) - - tf_graph = tf.Graph() - with tf_graph.as_default(), tf.device('/device:CPU:0'): - tf.compat.v1.set_random_seed(random_seed) - tensor_dict = proteins_dataset.np_to_tensor_dict( - np_example=np_example, features=feature_names) - - processed_batch = input_pipeline.process_tensors_from_config( - tensor_dict, cfg) - - tf_graph.finalize() - - with tf.Session(graph=tf_graph) as sess: - features = sess.run(processed_batch) - - return {k: v for k, v in features.items() if v.dtype != 'O'} diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index e15bc29b03d8c612a8921873d456a03126f79aae..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,63 +0,0 @@ -_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://detectron2/resnet50_caffe', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='caffe'), - roi_head=dict( - bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), - # model training and testing settings - train_cfg=dict( - rcnn=dict( - assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), - sampler=dict(num=256))), - test_cfg=dict(rcnn=dict(score_thr=1e-3))) -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadProposals', num_max_proposals=300), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadProposals', num_max_proposals=None), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img', 'proposals']), - ]) -] -data = dict( - train=dict( - proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl', - pipeline=train_pipeline), - val=dict( - proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', - pipeline=test_pipeline), - test=dict( - proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', - pipeline=test_pipeline)) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes.py deleted file mode 100644 index a990c076536ad9455a9203f5b6a60157f2f2f99f..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r18-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './deeplabv3_r50-d8_769x769_80k_cityscapes.py' -model = dict( - pretrained='open-mmlab://resnet18_v1c', - backbone=dict(depth=18), - decode_head=dict( - in_channels=512, - channels=128, - ), - auxiliary_head=dict(in_channels=256, channels=64)) diff --git a/spaces/Grezz/generate_human_motion/pyrender/tests/unit/test_offscreen.py b/spaces/Grezz/generate_human_motion/pyrender/tests/unit/test_offscreen.py deleted file mode 100644 index 88983b0ff4e2ab6f5ef252c51f2ac669c3a0e0ca..0000000000000000000000000000000000000000 --- a/spaces/Grezz/generate_human_motion/pyrender/tests/unit/test_offscreen.py +++ /dev/null @@ -1,92 +0,0 @@ -import numpy as np -import trimesh - -from pyrender import (OffscreenRenderer, PerspectiveCamera, DirectionalLight, - SpotLight, Mesh, Node, Scene) - - -def test_offscreen_renderer(tmpdir): - - # Fuze trimesh - fuze_trimesh = trimesh.load('examples/models/fuze.obj') - fuze_mesh = Mesh.from_trimesh(fuze_trimesh) - - # Drill trimesh - drill_trimesh = trimesh.load('examples/models/drill.obj') - drill_mesh = Mesh.from_trimesh(drill_trimesh) - drill_pose = np.eye(4) - drill_pose[0,3] = 0.1 - drill_pose[2,3] = -np.min(drill_trimesh.vertices[:,2]) - - # Wood trimesh - wood_trimesh = trimesh.load('examples/models/wood.obj') - wood_mesh = Mesh.from_trimesh(wood_trimesh) - - # Water bottle trimesh - bottle_gltf = trimesh.load('examples/models/WaterBottle.glb') - bottle_trimesh = bottle_gltf.geometry[list(bottle_gltf.geometry.keys())[0]] - bottle_mesh = Mesh.from_trimesh(bottle_trimesh) - bottle_pose = np.array([ - [1.0, 0.0, 0.0, 0.1], - [0.0, 0.0, -1.0, -0.16], - [0.0, 1.0, 0.0, 0.13], - [0.0, 0.0, 0.0, 1.0], - ]) - - boxv_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3)) - boxv_vertex_colors = np.random.uniform(size=(boxv_trimesh.vertices.shape)) - boxv_trimesh.visual.vertex_colors = boxv_vertex_colors - boxv_mesh = Mesh.from_trimesh(boxv_trimesh, smooth=False) - boxf_trimesh = trimesh.creation.box(extents=0.1 * np.ones(3)) - boxf_face_colors = np.random.uniform(size=boxf_trimesh.faces.shape) - boxf_trimesh.visual.face_colors = boxf_face_colors - # Instanced - poses = np.tile(np.eye(4), (2,1,1)) - poses[0,:3,3] = np.array([-0.1, -0.10, 0.05]) - poses[1,:3,3] = np.array([-0.15, -0.10, 0.05]) - boxf_mesh = Mesh.from_trimesh(boxf_trimesh, poses=poses, smooth=False) - - points = trimesh.creation.icosphere(radius=0.05).vertices - point_colors = np.random.uniform(size=points.shape) - points_mesh = Mesh.from_points(points, colors=point_colors) - - direc_l = DirectionalLight(color=np.ones(3), intensity=1.0) - spot_l = SpotLight(color=np.ones(3), intensity=10.0, - innerConeAngle=np.pi / 16, outerConeAngle=np.pi / 6) - - cam = PerspectiveCamera(yfov=(np.pi / 3.0)) - cam_pose = np.array([ - [0.0, -np.sqrt(2) / 2, np.sqrt(2) / 2, 0.5], - [1.0, 0.0, 0.0, 0.0], - [0.0, np.sqrt(2) / 2, np.sqrt(2) / 2, 0.4], - [0.0, 0.0, 0.0, 1.0] - ]) - - scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02])) - - fuze_node = Node(mesh=fuze_mesh, translation=np.array([ - 0.1, 0.15, -np.min(fuze_trimesh.vertices[:,2]) - ])) - scene.add_node(fuze_node) - boxv_node = Node(mesh=boxv_mesh, translation=np.array([-0.1, 0.10, 0.05])) - scene.add_node(boxv_node) - boxf_node = Node(mesh=boxf_mesh) - scene.add_node(boxf_node) - - _ = scene.add(drill_mesh, pose=drill_pose) - _ = scene.add(bottle_mesh, pose=bottle_pose) - _ = scene.add(wood_mesh) - _ = scene.add(direc_l, pose=cam_pose) - _ = scene.add(spot_l, pose=cam_pose) - _ = scene.add(points_mesh) - - _ = scene.add(cam, pose=cam_pose) - - r = OffscreenRenderer(viewport_width=640, viewport_height=480) - color, depth = r.render(scene) - - assert color.shape == (480, 640, 3) - assert depth.shape == (480, 640) - assert np.max(depth.data) > 0.05 - assert np.count_nonzero(depth.data) > (0.2 * depth.size) - r.delete() diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/device.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/device.py deleted file mode 100644 index 23b8d9b6a968910b5f86e1a40bc68355a40f4cfb..0000000000000000000000000000000000000000 --- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/device.py +++ /dev/null @@ -1,5 +0,0 @@ -import torch - -cpu_device = torch.device("cpu") -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -is_cuda_available = torch.cuda.is_available() diff --git a/spaces/HESOAYM/ElviraMulti/chatgpt - windows.bat b/spaces/HESOAYM/ElviraMulti/chatgpt - windows.bat deleted file mode 100644 index 0b78fdc3a559abd692e3a9e9af5e482124d13a99..0000000000000000000000000000000000000000 --- a/spaces/HESOAYM/ElviraMulti/chatgpt - windows.bat +++ /dev/null @@ -1,14 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" - -REM The web page can be accessed with delayed start http://127.0.0.1:7860/ -ping -n 5 127.0.0.1>nul - -REM access chargpt via your default browser -start "" "http://127.0.0.1:7860/" - - -echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/). \ No newline at end of file diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/conv_tbc.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/conv_tbc.py deleted file mode 100644 index 65e17ec94f7e595cb657b3d2daaa1052a95d0677..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/conv_tbc.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from torch import nn -from torch.nn.modules.utils import _single -from torch import Tensor - - -class ConvTBC(torch.nn.Module): - """1D convolution over an input of shape (time x batch x channel) - - The implementation uses gemm to perform the convolution. This implementation - is faster than cuDNN for small kernel sizes. - """ - - def __init__(self, in_channels, out_channels, kernel_size, padding=0): - super(ConvTBC, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _single(kernel_size) - self.padding = _single(padding) - - self.weight = torch.nn.Parameter( - torch.Tensor(self.kernel_size[0], in_channels, out_channels) - ) - self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) - - self.reset_parameters() - - def reset_parameters(self): - nn.init.xavier_normal_(self.weight) - nn.init.zeros_(self.bias) - - def conv_tbc(self, input: Tensor): - return torch.conv_tbc( - input.contiguous(), self.weight, self.bias, self.padding[0] - ) - - def forward(self, input: Tensor): - return self.conv_tbc(input) - - def __repr__(self): - s = ( - "{name}({in_channels}, {out_channels}, kernel_size={kernel_size}" - ", padding={padding}" - ) - if self.bias is None: - s += ", bias=False" - s += ")" - return s.format(name=self.__class__.__name__, **self.__dict__) diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/utils/inference/tts.py b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/utils/inference/tts.py deleted file mode 100644 index dc485ec44dbf34ddbb69c15ad524c0fab189c3c5..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/utils/inference/tts.py +++ /dev/null @@ -1,167 +0,0 @@ -from __future__ import absolute_import, division, print_function, unicode_literals -from typing import Tuple -import sys -from argparse import ArgumentParser - -import torch -import numpy as np -import os -import json -import torch - -sys.path.append(os.path.join(os.path.dirname(__file__), "../../src/glow_tts")) - -from scipy.io.wavfile import write -from hifi.env import AttrDict -from hifi.models import Generator - - -from text import text_to_sequence -import commons -import models -import utils - - -def check_directory(dir): - if not os.path.exists(dir): - sys.exit("Error: {} directory does not exist".format(dir)) - - -class TextToMel: - def __init__(self, glow_model_dir, device="cuda"): - self.glow_model_dir = glow_model_dir - check_directory(self.glow_model_dir) - self.device = device - self.hps, self.glow_tts_model = self.load_glow_tts() - - def load_glow_tts(self): - hps = utils.get_hparams_from_dir(self.glow_model_dir) - checkpoint_path = utils.latest_checkpoint_path(self.glow_model_dir) - symbols = list(hps.data.punc) + list(hps.data.chars) - glow_tts_model = models.FlowGenerator( - len(symbols) + getattr(hps.data, "add_blank", False), - out_channels=hps.data.n_mel_channels, - **hps.model - ) # .to(self.device) - - if self.device == "cuda": - glow_tts_model.to("cuda") - - utils.load_checkpoint(checkpoint_path, glow_tts_model) - glow_tts_model.decoder.store_inverse() - _ = glow_tts_model.eval() - - return hps, glow_tts_model - - def generate_mel(self, text, noise_scale=0.667, length_scale=1.0): - print(f"Noise scale: {noise_scale} and Length scale: {length_scale}") - symbols = list(self.hps.data.punc) + list(self.hps.data.chars) - cleaner = self.hps.data.text_cleaners - if getattr(self.hps.data, "add_blank", False): - text_norm = text_to_sequence(text, symbols, cleaner) - text_norm = commons.intersperse(text_norm, len(symbols)) - else: # If not using "add_blank" option during training, adding spaces at the beginning and the end of utterance improves quality - text = " " + text.strip() + " " - text_norm = text_to_sequence(text, symbols, cleaner) - - sequence = np.array(text_norm)[None, :] - - del symbols - del cleaner - del text - del text_norm - - if self.device == "cuda": - x_tst = torch.autograd.Variable(torch.from_numpy(sequence)).cuda().long() - x_tst_lengths = torch.tensor([x_tst.shape[1]]).cuda() - else: - x_tst = torch.autograd.Variable(torch.from_numpy(sequence)).long() - x_tst_lengths = torch.tensor([x_tst.shape[1]]) - - with torch.no_grad(): - (y_gen_tst, *_), *_, (attn_gen, *_) = self.glow_tts_model( - x_tst, - x_tst_lengths, - gen=True, - noise_scale=noise_scale, - length_scale=length_scale, - ) - del x_tst - del x_tst_lengths - torch.cuda.empty_cache() - return y_gen_tst.cpu().detach().numpy() - - -class MelToWav: - def __init__(self, hifi_model_dir, device="cuda"): - self.hifi_model_dir = hifi_model_dir - check_directory(self.hifi_model_dir) - self.device = device - self.h, self.hifi_gan_generator = self.load_hifi_gan() - - def load_hifi_gan(self): - checkpoint_path = utils.latest_checkpoint_path(self.hifi_model_dir, regex="g_*") - config_file = os.path.join(self.hifi_model_dir, "config.json") - data = open(config_file).read() - json_config = json.loads(data) - h = AttrDict(json_config) - torch.manual_seed(h.seed) - - generator = Generator(h).to(self.device) - - assert os.path.isfile(checkpoint_path) - print("Loading '{}'".format(checkpoint_path)) - state_dict_g = torch.load(checkpoint_path, map_location=self.device) - print("Complete.") - - generator.load_state_dict(state_dict_g["generator"]) - - generator.eval() - generator.remove_weight_norm() - - return h, generator - - def generate_wav(self, mel): - mel = torch.FloatTensor(mel).to(self.device) - - y_g_hat = self.hifi_gan_generator(mel) # passing through vocoder - audio = y_g_hat.squeeze() - audio = audio * 32768.0 - audio = audio.cpu().detach().numpy().astype("int16") - - del y_g_hat - del mel - torch.cuda.empty_cache() - return audio, self.h.sampling_rate - -def restricted_float(x): - try: - x = float(x) - except ValueError: - raise argparse.ArgumentTypeError("%r not a floating-point literal" % (x,)) - - if x < 0.0 or x > 1.0: - raise argparse.ArgumentTypeError("%r not in range [0.0, 1.0]"%(x,)) - return x - - -if __name__ == "__main__": - parser = ArgumentParser() - parser.add_argument("-a", "--acoustic", required=True, type=str) - parser.add_argument("-v", "--vocoder", required=True, type=str) - parser.add_argument("-d", "--device", type=str, default="cpu") - parser.add_argument("-t", "--text", type=str, required=True) - parser.add_argument("-w", "--wav", type=str, required=True) - parser.add_argument("-n", "--noise-scale", default=0.667, type=restricted_float ) - parser.add_argument("-l", "--length-scale", default=1.0, type=float) - - args = parser.parse_args() - - text_to_mel = TextToMel(glow_model_dir=args.acoustic, device=args.device) - mel_to_wav = MelToWav(hifi_model_dir=args.vocoder, device=args.device) - - mel = text_to_mel.generate_mel(args.text, args.noise_scale, args.length_scale) - audio, sr = mel_to_wav.generate_wav(mel) - - write(filename=args.wav, rate=sr, data=audio) - diff --git a/spaces/Hila/RobustViT/robustness_dataset_per_class.py b/spaces/Hila/RobustViT/robustness_dataset_per_class.py deleted file mode 100644 index ca4c09ac578799fea50d17350ad6f5cb11017424..0000000000000000000000000000000000000000 --- a/spaces/Hila/RobustViT/robustness_dataset_per_class.py +++ /dev/null @@ -1,65 +0,0 @@ -import json -from torchvision.datasets import ImageFolder -import torch -import os -from PIL import Image -import collections -import torchvision.transforms as transforms -from label_str_to_imagenet_classes import label_str_to_imagenet_classes - -torch.manual_seed(0) - -ImageItem = collections.namedtuple('ImageItem', ('image_name', 'tag')) - -normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], - std=[0.5, 0.5, 0.5]) - -transform = transforms.Compose([ - transforms.Resize(256), - transforms.CenterCrop(224), - transforms.ToTensor(), - normalize, -]) - -class RobustnessDataset(ImageFolder): - def __init__(self, imagenet_path, folder, imagenet_classes_path='imagenet_classes.json', isV2=False, isSI=False): - self._isV2 = isV2 - self._isSI = isSI - self._folder = folder - self._imagenet_path = imagenet_path - with open(imagenet_classes_path, 'r') as f: - self._imagenet_classes = json.load(f) - self._all_images = [] - - base_dir = os.path.join(self._imagenet_path, folder) - for i, file in enumerate(os.listdir(base_dir)): - self._all_images.append(ImageItem(file, folder)) - - - def __getitem__(self, item): - image_item = self._all_images[item] - image_path = os.path.join(self._imagenet_path, image_item.tag, image_item.image_name) - image = Image.open(image_path) - image = image.convert('RGB') - image = transform(image) - - if self._isV2: - class_name = int(image_item.tag) - elif self._isSI: - class_name = int(label_str_to_imagenet_classes[image_item.tag]) - else: - class_name = int(self._imagenet_classes[image_item.tag]) - - return image, class_name - - def __len__(self): - return len(self._all_images) - - def get_classname(self): - if self._isV2: - class_name = int(self._folder) - elif self._isSI: - class_name = int(label_str_to_imagenet_classes[self._folder]) - else: - class_name = int(self._imagenet_classes[self._folder]) - return class_name \ No newline at end of file diff --git a/spaces/HuggingFaceM4/OBELICS-Interactive-Map/README.md b/spaces/HuggingFaceM4/OBELICS-Interactive-Map/README.md deleted file mode 100644 index 25db1c780ca473c9fa2a032b2c5a456f4de734d5..0000000000000000000000000000000000000000 --- a/spaces/HuggingFaceM4/OBELICS-Interactive-Map/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: OBELICS Interactive Map -emoji: 👓 -colorFrom: yellow -colorTo: blue -sdk: static -pinned: false -datasets: -- HuggingFaceM4/OBELISC ---- diff --git a/spaces/IDEA-Research/Grounded-SAM/segment_anything/CODE_OF_CONDUCT.md b/spaces/IDEA-Research/Grounded-SAM/segment_anything/CODE_OF_CONDUCT.md deleted file mode 100644 index 08b500a221857ec3f451338e80b4a9ab1173a1af..0000000000000000000000000000000000000000 --- a/spaces/IDEA-Research/Grounded-SAM/segment_anything/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,80 +0,0 @@ -# Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to make participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies within all project spaces, and it also applies when -an individual is representing the project or its community in public spaces. -Examples of representing a project or community include using an official -project e-mail address, posting via an official social media account, or acting -as an appointed representative at an online or offline event. Representation of -a project may be further defined and clarified by project maintainers. - -This Code of Conduct also applies outside the project spaces when there is a -reasonable belief that an individual's behavior may have a negative impact on -the project or its community. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at . All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/spaces/Ibtehaj10/cheating-detection-FYP/yolovs5/models/__init__.py b/spaces/Ibtehaj10/cheating-detection-FYP/yolovs5/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Intel/Q8-Chat/utils.py b/spaces/Intel/Q8-Chat/utils.py deleted file mode 100644 index 274d36f46615d35a2b90821829c9b44d1a85e1c3..0000000000000000000000000000000000000000 --- a/spaces/Intel/Q8-Chat/utils.py +++ /dev/null @@ -1,50 +0,0 @@ -from enum import IntEnum - - -CONTROLLER_HEART_BEAT_EXPIRATION = 90 -WORKER_HEART_BEAT_INTERVAL = 30 -WORKER_API_TIMEOUT = 20 - -LOGDIR = "." - - -class ErrorCode(IntEnum): - """ - https://platform.openai.com/docs/guides/error-codes/api-errors - """ - - VALIDATION_TYPE_ERROR = 40001 - - INVALID_AUTH_KEY = 40101 - INCORRECT_AUTH_KEY = 40102 - NO_PERMISSION = 40103 - - INVALID_MODEL = 40301 - PARAM_OUT_OF_RANGE = 40302 - CONTEXT_OVERFLOW = 40303 - - RATE_LIMIT = 42901 - QUOTA_EXCEEDED = 42902 - ENGINE_OVERLOADED = 42903 - - INTERNAL_ERROR = 50001 - CUDA_OUT_OF_MEMORY = 50002 - GRADIO_REQUEST_ERROR = 50003 - GRADIO_STREAM_UNKNOWN_ERROR = 50004 - CONTROLLER_NO_WORKER = 50005 - CONTROLLER_WORKER_TIMEOUT = 50006 - - -get_window_url_params_js = """ -function() { - const params = new URLSearchParams(window.location.search); - url_params = Object.fromEntries(params); - console.log("url_params", url_params); - return url_params; - } -""" - - -server_error_msg = ( - "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" -) diff --git a/spaces/Izal887/Konci887/vc_infer_pipeline.py b/spaces/Izal887/Konci887/vc_infer_pipeline.py deleted file mode 100644 index c26d45068f9b6bf2b194b13c3c89f8a06347c124..0000000000000000000000000000000000000000 --- a/spaces/Izal887/Konci887/vc_infer_pipeline.py +++ /dev/null @@ -1,306 +0,0 @@ -import numpy as np, parselmouth, torch, pdb -from time import time as ttime -import torch.nn.functional as F -from config import x_pad, x_query, x_center, x_max -import scipy.signal as signal -import pyworld, os, traceback, faiss -from scipy import signal - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - - -class VC(object): - def __init__(self, tgt_sr, device, is_half): - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * x_query # 查询切点前后查询时间 - self.t_center = self.sr * x_center # 查询切点位置 - self.t_max = self.sr * x_max # 免查询时长阈值 - self.device = device - self.is_half = is_half - - def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None): - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - f0, t = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0] - f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9, # layer 9 - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) - - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - _, I = index.search(npy, 1) - npy = big_npy[I.squeeze()] - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - times, - f0_up_key, - f0_method, - file_index, - file_big_npy, - index_rate, - if_f0, - f0_file=None, - ): - if ( - file_big_npy != "" - and file_index != "" - and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - big_npy = np.load(file_big_npy) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - print("Feature retrieval library doesn't exist or ratio is 0") - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/JUNGU/gpt4kids/app.py b/spaces/JUNGU/gpt4kids/app.py deleted file mode 100644 index 35b0d693e95ec030be0d72d70f24276510bf0856..0000000000000000000000000000000000000000 --- a/spaces/JUNGU/gpt4kids/app.py +++ /dev/null @@ -1,97 +0,0 @@ -from langchain.chat_models import ChatOpenAI -from langchain.document_loaders import PyPDFLoader -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.embeddings.cohere import CohereEmbeddings -from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch -from langchain.vectorstores import Chroma -from PyPDF2 import PdfWriter -import gradio as gr -import os -from dotenv import load_dotenv -import openai - -load_dotenv() - -os.environ["OPENAI_API_KEY"] = os.environ['my_secret'] -llm = ChatOpenAI(model_name="gpt-3.5-turbo-0613", temperature=0) -loader = PyPDFLoader("/home/user/app/docs.pdf") -documents = loader.load() - -text_splitter = CharacterTextSplitter(chunk_size=800, chunk_overlap=0) -texts = text_splitter.split_documents(documents) - -#vector embedding -embeddings = OpenAIEmbeddings() -vector_store = Chroma.from_documents(texts, embeddings) -retriever = vector_store.as_retriever(search_kwargs={"k": 3}) - - -from langchain.prompts.chat import ( - ChatPromptTemplate, - SystemMessagePromptTemplate, - HumanMessagePromptTemplate, -) - -system_template="""Your name is conanssam as a homeroom teacher for elementary school. You act like a nice teacher. -Never reveal that it is an AI. Generate answers for children. -If you don't know the answer, just say that "I don't know", don't try to make up an answer. ----------------- -{summaries} - -You MUST answer in Korean and in Markdown format:""" - -messages = [ - SystemMessagePromptTemplate.from_template(system_template), - HumanMessagePromptTemplate.from_template("{question}") -] - -prompt = ChatPromptTemplate.from_messages(messages) - -from langchain.chat_models import ChatOpenAI -from langchain.chains import RetrievalQAWithSourcesChain - -chain_type_kwargs = {"prompt": prompt} - -chain = RetrievalQAWithSourcesChain.from_chain_type( - llm=llm, - chain_type="stuff", - retriever=retriever, - return_source_documents=True, - chain_type_kwargs=chain_type_kwargs, - reduce_k_below_max_tokens=True, - verbose=True, -) - -query = "행복한 인생이란?" -result = chain(query) - - -for doc in result['source_documents']: - print('내용 : ' + doc.page_content[0:100].replace('\n', ' ')) - print('파일 : ' + doc.metadata['source']) - print('페이지 : ' + str(doc.metadata['page'])) - - -def respond(message, chat_history): # 채팅봇의 응답을 처리하는 함수를 정의합니다. - - result = chain(message) - - bot_message = result['answer'] - - # for i, doc in enumerate(result['source_documents']): - # bot_message += '[' + str(i+1) + '] ' + doc.metadata['source'] + '(' + str(doc.metadata['page']) + ') ' - - chat_history.append((message, bot_message)) # 채팅 기록에 사용자의 메시지와 봇의 응답을 추가합니다. - - return "", chat_history # 수정된 채팅 기록을 반환합니다. - -with gr.Blocks(theme='gstaff/sketch') as demo: # gr.Blocks()를 사용하여 인터페이스를 생성합니다. - gr.Markdown("# 안녕하세요.초등학생을 위한 챗GPT입니다. \n 답변 생성에 조금 시간이 소요될 수 있습니다.") - chatbot = gr.Chatbot(label="채팅창") # '채팅창'이라는 레이블을 가진 채팅봇 컴포넌트를 생성합니다. - msg = gr.Textbox(label="입력") # '입력'이라는 레이블을 가진 텍스트박스를 생성합니다. - clear = gr.Button("초기화") # '초기화'라는 레이블을 가진 버튼을 생성합니다. - - msg.submit(respond, [msg, chatbot], [msg, chatbot]) # 텍스트박스에 메시지를 입력하고 제출하면 respond 함수가 호출되도록 합니다. - clear.click(lambda: None, None, chatbot, queue=False) # '초기화' 버튼을 클릭하면 채팅 기록을 초기화합니다. -demo.launch(debug=True) # 인터페이스를 실행합니다. 실행하면 사용자는 '입력' 텍스트박스에 메시지를 작성하고 제출할 수 있으며, '초기화' 버튼을 통해 채팅 기록을 초기화 할 수 있습니다. diff --git a/spaces/Jaehan/Text-Generation-5/README.md b/spaces/Jaehan/Text-Generation-5/README.md deleted file mode 100644 index 5f3b5267d422839e2ff01068a025b90c6d1179e0..0000000000000000000000000000000000000000 --- a/spaces/Jaehan/Text-Generation-5/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Text Generation 5 -emoji: 🏆 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Jamkonams/AutoGPT/tests/local_cache_test.py b/spaces/Jamkonams/AutoGPT/tests/local_cache_test.py deleted file mode 100644 index bb10862656bb500f319ac231ff5bd5438d6fe7e2..0000000000000000000000000000000000000000 --- a/spaces/Jamkonams/AutoGPT/tests/local_cache_test.py +++ /dev/null @@ -1,67 +0,0 @@ -# sourcery skip: snake-case-functions -"""Tests for LocalCache class""" -import os -import sys -import unittest - -import pytest - -from autogpt.memory.local import LocalCache - - -def mock_config() -> dict: - """Mock the Config class""" - return type( - "MockConfig", - (object,), - { - "debug_mode": False, - "continuous_mode": False, - "speak_mode": False, - "memory_index": "auto-gpt", - }, - ) - - -@pytest.mark.integration_test -class TestLocalCache(unittest.TestCase): - """Tests for LocalCache class""" - - def setUp(self) -> None: - """Set up the test environment""" - self.cfg = mock_config() - self.cache = LocalCache(self.cfg) - - def test_add(self) -> None: - """Test adding a text to the cache""" - text = "Sample text" - self.cache.add(text) - self.assertIn(text, self.cache.data.texts) - - def test_clear(self) -> None: - """Test clearing the cache""" - self.cache.clear() - self.assertEqual(self.cache.data.texts, []) - - def test_get(self) -> None: - """Test getting a text from the cache""" - text = "Sample text" - self.cache.add(text) - result = self.cache.get(text) - self.assertEqual(result, [text]) - - def test_get_relevant(self) -> None: - """Test getting relevant texts from the cache""" - text1 = "Sample text 1" - text2 = "Sample text 2" - self.cache.add(text1) - self.cache.add(text2) - result = self.cache.get_relevant(text1, 1) - self.assertEqual(result, [text1]) - - def test_get_stats(self) -> None: - """Test getting the cache stats""" - text = "Sample text" - self.cache.add(text) - stats = self.cache.get_stats() - self.assertEqual(stats, (4, self.cache.data.embeddings.shape)) diff --git a/spaces/Jeff2323/ai-comic-factory/src/components/icons/full-screen.tsx b/spaces/Jeff2323/ai-comic-factory/src/components/icons/full-screen.tsx deleted file mode 100644 index 34ec93bbab4b8359868737dbab9c6f7f6d594e03..0000000000000000000000000000000000000000 --- a/spaces/Jeff2323/ai-comic-factory/src/components/icons/full-screen.tsx +++ /dev/null @@ -1,16 +0,0 @@ -export function FullScreenIcon() { - return ( - - - - - - - - - - - - - ) -} \ No newline at end of file diff --git a/spaces/JeffJing/ZookChatBot/steamship/data/tags/__init__.py b/spaces/JeffJing/ZookChatBot/steamship/data/tags/__init__.py deleted file mode 100644 index d9766ef9f9dab6ee7106bc40148b35ec7e556ce7..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/steamship/data/tags/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .tag import Tag -from .tag_constants import DocTag, GenerationTag, TagKind, TagValueKey, TokenTag - -__all__ = [ - "DocTag", - "Tag", - "TagKind", - "TokenTag", - "TagValueKey", - "GenerationTag", -] diff --git a/spaces/JohnSmith9982/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py b/spaces/JohnSmith9982/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py deleted file mode 100644 index ce3e12bbf0469426872eed5f681985d3e1be9b26..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py +++ /dev/null @@ -1,30 +0,0 @@ -import re -import opencc - - -dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou', - 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing', - 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang', - 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan', - 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen', - 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'} - -converters = {} - -for dialect in dialects.values(): - try: - converters[dialect] = opencc.OpenCC(dialect) - except: - pass - - -def ngu_dialect_to_ipa(text, dialect): - dialect = dialects[dialect] - text = converters[dialect].convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/Junity/TokaiTeio-SVC/vdecoder/hifigan/models.py b/spaces/Junity/TokaiTeio-SVC/vdecoder/hifigan/models.py deleted file mode 100644 index 9747301f350bb269e62601017fe4633ce271b27e..0000000000000000000000000000000000000000 --- a/spaces/Junity/TokaiTeio-SVC/vdecoder/hifigan/models.py +++ /dev/null @@ -1,503 +0,0 @@ -import os -import json -from .env import AttrDict -import numpy as np -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from .utils import init_weights, get_padding - -LRELU_SLOPE = 0.1 - - -def load_model(model_path, device='cuda'): - config_file = os.path.join(os.path.split(model_path)[0], 'config.json') - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - generator = Generator(h).to(device) - - cp_dict = torch.load(model_path) - generator.load_state_dict(cp_dict['generator']) - generator.eval() - generator.remove_weight_norm() - del cp_dict - return generator, h - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -def padDiff(x): - return F.pad(F.pad(x, (0,0,-1,1), 'constant', 0) - x, (0,0,0,-1), 'constant', 0) - -class SineGen(torch.nn.Module): - """ Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__(self, samp_rate, harmonic_num=0, - sine_amp=0.1, noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - self.flag_for_pulse = flag_for_pulse - - def _f02uv(self, f0): - # generate uv signal - uv = (f0 > self.voiced_threshold).type(torch.float32) - return uv - - def _f02sine(self, f0_values): - """ f0_values: (batchsize, length, dim) - where dim indicates fundamental tone and overtones - """ - # convert to F0 in rad. The interger part n can be ignored - # because 2 * np.pi * n doesn't affect phase - rad_values = (f0_values / self.sampling_rate) % 1 - - # initial phase noise (no noise for fundamental component) - rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ - device=f0_values.device) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - - # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) - if not self.flag_for_pulse: - # for normal case - - # To prevent torch.cumsum numerical overflow, - # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. - # Buffer tmp_over_one_idx indicates the time step to add -1. - # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi - tmp_over_one = torch.cumsum(rad_values, 1) % 1 - tmp_over_one_idx = (padDiff(tmp_over_one)) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - - sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) - * 2 * np.pi) - else: - # If necessary, make sure that the first time step of every - # voiced segments is sin(pi) or cos(0) - # This is used for pulse-train generation - - # identify the last time step in unvoiced segments - uv = self._f02uv(f0_values) - uv_1 = torch.roll(uv, shifts=-1, dims=1) - uv_1[:, -1, :] = 1 - u_loc = (uv < 1) * (uv_1 > 0) - - # get the instantanouse phase - tmp_cumsum = torch.cumsum(rad_values, dim=1) - # different batch needs to be processed differently - for idx in range(f0_values.shape[0]): - temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] - temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] - # stores the accumulation of i.phase within - # each voiced segments - tmp_cumsum[idx, :, :] = 0 - tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum - - # rad_values - tmp_cumsum: remove the accumulation of i.phase - # within the previous voiced segment. - i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) - - # get the sines - sines = torch.cos(i_phase * 2 * np.pi) - return sines - - def forward(self, f0): - """ sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, - device=f0.device) - # fundamental component - fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device)) - - # generate sine waveforms - sine_waves = self._f02sine(fn) * self.sine_amp - - # generate uv signal - # uv = torch.ones(f0.shape) - # uv = uv * (f0 > self.voiced_threshold) - uv = self._f02uv(f0) - - # noise: for unvoiced should be similar to sine_amp - # std = self.sine_amp/3 -> max value ~ self.sine_amp - # . for voiced regions is self.noise_std - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - - # first: set the unvoiced part to 0 by uv - # then: additive noise - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """ SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - - # to produce sine waveforms - self.l_sin_gen = SineGen(sampling_rate, harmonic_num, - sine_amp, add_noise_std, voiced_threshod) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x): - """ - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - """ - # source for harmonic branch - sine_wavs, uv, _ = self.l_sin_gen(x) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - - # source for noise branch, in the same shape as uv - noise = torch.randn_like(uv) * self.sine_amp / 3 - return sine_merge, noise, uv - - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - - self.num_kernels = len(h["resblock_kernel_sizes"]) - self.num_upsamples = len(h["upsample_rates"]) - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h["upsample_rates"])) - self.m_source = SourceModuleHnNSF( - sampling_rate=h["sampling_rate"], - harmonic_num=8) - self.noise_convs = nn.ModuleList() - self.conv_pre = weight_norm(Conv1d(h["inter_channels"], h["upsample_initial_channel"], 7, 1, padding=3)) - resblock = ResBlock1 if h["resblock"] == '1' else ResBlock2 - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(h["upsample_rates"], h["upsample_kernel_sizes"])): - c_cur = h["upsample_initial_channel"] // (2 ** (i + 1)) - self.ups.append(weight_norm( - ConvTranspose1d(h["upsample_initial_channel"] // (2 ** i), h["upsample_initial_channel"] // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - if i + 1 < len(h["upsample_rates"]): # - stride_f0 = np.prod(h["upsample_rates"][i + 1:]) - self.noise_convs.append(Conv1d( - 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2)) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = h["upsample_initial_channel"] // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(h["resblock_kernel_sizes"], h["resblock_dilation_sizes"])): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - self.cond = nn.Conv1d(h['gin_channels'], h['upsample_initial_channel'], 1) - - def forward(self, x, f0, g=None): - # print(1,x.shape,f0.shape,f0[:, None].shape) - f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t - # print(2,f0.shape) - har_source, noi_source, uv = self.m_source(f0) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - x = x + self.cond(g) - # print(124,x.shape,har_source.shape) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - # print(3,x.shape) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - # print(4,x_source.shape,har_source.shape,x.shape) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, periods=None): - super(MultiPeriodDiscriminator, self).__init__() - self.periods = periods if periods is not None else [2, 3, 5, 7, 11] - self.discriminators = nn.ModuleList() - for period in self.periods: - self.discriminators.append(DiscriminatorP(period)) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 128, 15, 1, padding=7)), - norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiScaleDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiScaleDiscriminator, self).__init__() - self.discriminators = nn.ModuleList([ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ]) - self.meanpools = nn.ModuleList([ - AvgPool1d(4, 2, padding=2), - AvgPool1d(4, 2, padding=2) - ]) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - if i != 0: - y = self.meanpools[i - 1](y) - y_hat = self.meanpools[i - 1](y_hat) - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg ** 2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses diff --git a/spaces/KAIST-Geometric-AI-Lab/salad-demo/salad/spaghetti/ui/ui_controllers.py b/spaces/KAIST-Geometric-AI-Lab/salad-demo/salad/spaghetti/ui/ui_controllers.py deleted file mode 100644 index 362e93fdacd0551293552d57ad9a959f519ca42c..0000000000000000000000000000000000000000 --- a/spaces/KAIST-Geometric-AI-Lab/salad-demo/salad/spaghetti/ui/ui_controllers.py +++ /dev/null @@ -1,758 +0,0 @@ -from __future__ import annotations -import constants -import vtk -import vtk.util.numpy_support as numpy_support -from custom_types import * -from utils import files_utils, rotation_utils -from models import gm_utils -from ui import ui_utils, inference_processing, gaussian_status -import options - - -def filter_by_inclusion(gaussian: gaussian_status.GaussianStatus) -> bool: - return gaussian.included - - -def filter_by_selection(gaussian: gaussian_status.GaussianStatus) -> bool: - return gaussian.is_selected - - -class GmmMeshStage: - - def turn_off_selected(self): - if self.selected is not None: - # self.arrows.turn_off() - self.toggle_selection(self.selected) - self.selected = None - - def turn_gmm_off(self): - self.turn_off_selected() - for gaussian in self.gmm: - gaussian.turn_off() - - def turn_gmm_on(self): - for gaussian in self.gmm: - gaussian.turn_on() - - def event_manger(self, object_id: str): - if object_id in self.addresses_dict: - return self.toggle_selection(object_id) - elif self.arrows.check_event(object_id): - transform = self.arrows.get_transform(object_id) - self.update_gmm(*transform) - return True - return False - - def toggle_selection(self, object_id: str): - self.gmm[self.addresses_dict[object_id]].toggle_selection() - if self.selected is None: - self.selected = object_id - elif self.selected == object_id and self.gmm[self.addresses_dict[object_id]].is_not_selected: - self.selected = None - else: - self.gmm[self.addresses_dict[self.selected]].toggle_selection() - self.selected = object_id - # if self.selected is not None: - # self.arrows.update_arrows_transform(self.gmm[self.addresses_dict[self.selected]]) - # else: - # self.arrows.turn_off() - return True - - def toggle_inclusion_by_id(self, g_id: int, select: Optional[bool] = None) -> Tuple[bool, List[gaussian_status.GaussianStatus]]: - toggled = [] - self.gmm[g_id].toggle_inclusion(select) - toggled.append(self.gmm[g_id]) - if self.symmetric_mode: - if self.gmm[g_id].twin is not None and self.gmm[g_id].twin.included != self.gmm[g_id].included: - self.gmm[g_id].twin.toggle_inclusion(select) - toggled.append(self.gmm[g_id].twin) - return True, toggled - - def toggle_inclusion(self, object_id: str) -> Tuple[bool, List[gaussian_status.GaussianStatus]]: - if object_id in self.addresses_dict: - return self.toggle_inclusion_by_id(self.addresses_dict[object_id]) - return False, [] - - def toggle_all(self): - for gaussian in self.gmm: - gaussian.toggle_inclusion() - - def __len__(self): - return len(self.gmm) - - def set_opacity(self, opacity: float): - self.view_style.opacity = opacity - for gaussian in self.gmm: - gaussian.set_color() - - def update_gmm(self, button: ui_utils.Buttons, key: str) -> bool: - if self.selected is not None: - g_id = self.addresses_dict[self.selected] - self.gmm[g_id].apply_affine(button, key) - if self.symmetric_mode: - if self.gmm[g_id].twin is not None: - self.gmm[g_id].twin.make_symmetric(False) - # self.arrows.update_arrows_transform(self.gmm[self.addresses_dict[self.selected]]) - return True - return False - - def get_gmm(self) -> Tuple[TS, T]: - raw_gmm = [g.get_raw_data() for g in self.gmm if g.included] - phi = torch.tensor([g[0] for g in raw_gmm], dtype=torch.float32).view(1, 1, -1) - # phi = torch.from_numpy(self.raw_gmm[0]).view(1, 1, -1).float() - mu = torch.stack([torch.from_numpy(g[1]).float() for g in raw_gmm], dim=0).view(1, 1, -1, 3) - p = torch.stack([torch.from_numpy(g[3]).float() for g in raw_gmm], dim=0).view(1, 1, -1, 3, 3) - eigen = torch.stack([torch.from_numpy(g[2]).float() for g in raw_gmm], dim=0).view(1, 1, -1, 3) - gmm = mu, p, phi, eigen - included = torch.tensor([g.gaussian_id for g in self.gmm if g.included], dtype=torch.int64) - return gmm, included - - def reset(self): - for g in self.gmm: - g.reset() - # self.turn_off_selected() - - def remove_all(self): - self.remove_gaussians(list(self.addresses_dict.keys())) - self.addresses_dict = {} - self.gmm = [] - - # def switch_arrows(self, arrow_type: ui_utils.Buttons): - # if self.arrows.switch_arrows(arrow_type) and self.selected is not None: - # self.arrows.update_arrows_transform(self.gmm[self.addresses_dict[self.selected]]) - - def toggle_symmetric(self, force_include: bool): - self.symmetric_mode = not self.symmetric_mode and False - # visited = set() - if self.symmetric_mode: - for i in range(len(self)): - self.gmm[i].make_symmetric(force_include) - - def remove_gaussians(self, addresses: List[str]): - for address in addresses: - gaussian_id: int = self.addresses_dict[address] - gaussian = self.gmm[gaussian_id] - # if gaussian.is_selected: - # self.toggle_selection(address) - self.gmm[gaussian_id] = None - gaussian.delete(self.render) - del self.addresses_dict[address] - self.gmm = [gaussian for gaussian in self.gmm if gaussian is not None] - self.addresses_dict = {self.gmm[i].get_address(): i for i in range(len(self.gmm))} - - def add_gaussians(self, gaussians: List[gaussian_status.GaussianStatus]) -> List[str]: - new_addresses = [] - for i, gaussian in enumerate(gaussians): - gaussian_copy = gaussian.copy(self.render, self.view_style, is_selected=False) - self.gmm.append(gaussian_copy) - new_addresses.append(gaussian_copy.get_address()) - self.addresses_dict = {self.gmm[i].get_address(): i for i in range(len(self.gmm))} - return new_addresses - - def make_twins(self, address_a: str, address_b: str): - if address_a in self.addresses_dict and address_b in self.addresses_dict: - gaussian_a, gaussian_b = self.gmm[self.addresses_dict[address_a]], self.gmm[self.addresses_dict[address_b]] - gaussian_a.twin = gaussian_b - gaussian_b.twin = gaussian_a - - def split_mesh_by_gmm(self, mesh) -> Dict[int, T]: - faces_split = {} - mu, p, phi, _ = self.get_gmm()[0] - eigen = torch.stack([torch.from_numpy(g.get_view_eigen()).float() for g in self.gmm if g.included], dim=0).view(1, 1, -1, 3) - gmm = mu, p, phi, eigen - faces_split_ = gm_utils.split_mesh_by_gmm(mesh, gmm) - counter = 0 - for i in range(len(self.gmm)): - if self.gmm[i].disabled: - faces_split[i] = None - else: - faces_split[i] = faces_split_[counter] - counter += 1 - return faces_split - - @staticmethod - def get_part_face(mesh: V_Mesh, faces_inds: T) -> Tuple[T_Mesh, T]: - mesh = mesh[0], torch.from_numpy(mesh[1]).long() - mask = faces_inds.ne(0) - faces = mesh[1][mask] - vs_inds = faces.flatten().unique() - vs = mesh[0][vs_inds] - mapper = torch.zeros(mesh[0].shape[0], dtype=torch.int64) - mapper[vs_inds] = torch.arange(vs.shape[0]) - return (vs, mapper[faces]), faces_inds[mask] - - def save(self, root: str, filter_faces: Callable[[gaussian_status.GaussianStatus], bool] = filter_by_inclusion): - if self.faces is not None: - if self.gmm_id == -1: - name = "mix" - else: - name = str(self.gmm_id) - path = f"{root}/{files_utils.get_time_name(name)}" - faces = list(filter(lambda x: x[1] is not None, self.faces.items())) - mesh = self.vs, np.concatenate(list(map(lambda x: x[1], faces))) - faces_inds = map(lambda x: - torch.ones(x[1].shape[0], dtype=torch.int64) - if filter_faces(self.gmm[x[0]]) else torch.zeros(x[1].shape[0], dtype=torch.int64), faces) - faces_inds = torch.cat(list(faces_inds)) - # if name != 'mix': - # mesh, faces_inds = self.get_part_face(mesh, faces_inds) - files_utils.export_mesh(mesh, path) - files_utils.export_list(faces_inds.tolist(), f"{path}_faces") - - def aggregate_symmetric(self) -> Dict[str, int]: - if not self.symmetric_mode: - return self.votes - out = {} - for item in self.votes: - actor_id = self.addresses_dict[item] - twin = self.gmm[actor_id].twin - out[item] = self.votes[item] - if twin is not None and twin.get_address() not in self.votes: - out[twin.get_address()] = self.votes[item] - return out - - def aggregate_votes(self) -> List[int]: - # to_do = self.add_selection if select else self.clear_selection - actors_id = [] - # votes = self.aggregate_symmetric() - for item in self.votes: - actor_id = self.addresses_dict[item] - actors_id.append(actor_id) - self.votes = {} - return actors_id - - def vote(self, *actors: Optional[vtk.vtkActor]): - for actor in actors: - if actor is not None: - address = actor.GetAddressAsString('') - if address in self.addresses_dict: - if address not in self.votes: - self.votes[address] = 0 - self.votes[address] += 1 - - @staticmethod - def faces_to_vtk_faces(faces: Union[T, ARRAY]): - if type(faces) is T: - faces = faces.detach().cpu().numpy() - cells_npy = np.column_stack( - [np.full(faces.shape[0], 3, dtype=np.int64), faces.astype(np.int64)]).ravel() - faces_vtk = vtk.vtkCellArray() - faces_vtk.SetCells(faces.shape[0], numpy_support.numpy_to_vtkIdTypeArray(cells_npy)) - return faces_vtk - - def get_mesh_part(self, vs: vtk.vtkPoints, faces: Optional[Union[T, ARRAY]]) -> Optional[vtk.vtkPolyData]: - if faces is not None: - # actor_mesh = vtk.vtkActor() - mesh = vtk.vtkPolyData() - # mapper = vtk.vtkPolyDataMapper() - mesh.SetPoints(vs) - mesh.SetPolys(self.faces_to_vtk_faces(faces)) - # mapper.SetInputData(mesh) - # actor_mesh.SetMapper(mapper) - # actor_mesh.GetProperty().SetOpacity(0.3) - # actor_mesh.PickableOff() - # if self.to_init: - # self.render.AddActor(actor_mesh) - return mesh - return None - - def add_gmm(self) -> List[gaussian_status.GaussianStatus]: - gmms = [] - if len(self.raw_gmm) > 0: - phi = self.raw_gmm[0] - phi = np.exp(phi) - phi = phi / phi.sum() - for i, gaussian in enumerate(zip(*self.raw_gmm)): - gaussian = gaussian_status.GaussianStatus(gaussian, (self.gmm_id, i), False, self.view_style, - self.render, phi[i]) - gmms.append(gaussian) - return gmms - - def add_mesh(self, base_mesh: T_Mesh, split_mesh: bool = True, for_slider: bool = True): - if base_mesh is not None: - vs_vtk = vtk.vtkPoints() - self.vs = base_mesh[0] - if for_slider: - vs_ui = self.init_mesh_pos(base_mesh[0]) - else: - vs_ui = self.vs - vs_vtk.SetData(numpy_support.numpy_to_vtk(vs_ui.numpy())) - if split_mesh: - self.faces = self.split_mesh_by_gmm(base_mesh) - for i in range(len(self.gmm)): - part_mesh = self.get_mesh_part(vs_vtk, self.faces[i]) - self.gmm[i].replace_part(part_mesh) - else: - part_mesh = self.get_mesh_part(vs_vtk, base_mesh[1]) - self.gmm[0].replace_part(part_mesh) - - def set_brush(self, is_draw: bool): - self.render.set_brush(is_draw) - - def replace_mesh(self, mesh: Optional[V_Mesh]): - mesh = torch.from_numpy(mesh[0]).float(), torch.from_numpy(mesh[1]).long() - self.add_mesh(mesh, for_slider=False) - # if mesh is None: - # return - # else: - # reduction = 1 - 50000. / mesh[1].shape[0] - # source_ = MeshStage.mesh_to_polydata(mesh) - # source_ = MeshStage.smooth_mesh(source_, ui_utils.SmoothingMethod.Taubin) - # self.decimate_mesh(source_, reduction, out=self.mapper.GetInput()) - # self.is_changed = True - # if not self.to_init: - # self.to_init = True - # self.render.AddActor(self.actor) - - def init_mesh_pos(self, vs: T): - vs = vs.clone() - r_a = rotation_utils.get_rotation_matrix(150, 1, degree=True) - r_b = rotation_utils.get_rotation_matrix(-15, 0, degree=True) - r = torch.from_numpy(np.einsum('km,mn->kn', r_b, r_a)).float() - vs = torch.einsum('ad,nd->na', r, vs) - vs[:, 0] += self.gmm_id * 2 - return vs - - @staticmethod - def mesh_to_polydata(mesh: Union[T_Mesh, V_Mesh], source: Optional[vtk.vtkPolyData] = None) -> vtk.vtkPolyData: - if source is None: - source = vtk.vtkPolyData() - vs, faces = mesh - if type(vs) is T: - vs, faces = vs.detach().cpu().numpy(), faces.detach().cpu().numpy() - new_vs_vtk = numpy_support.numpy_to_vtk(vs) - cells_npy = np.column_stack( - [np.full(faces.shape[0], 3, dtype=np.int64), faces.astype(np.int64)]).ravel() - vs_vtk, faces_vtk = vtk.vtkPoints(), vtk.vtkCellArray() - vs_vtk.SetData(new_vs_vtk) - faces_vtk.SetCells(faces.shape[0], numpy_support.numpy_to_vtkIdTypeArray(cells_npy)) - source.SetPoints(vs_vtk) - source.SetPolys(faces_vtk) - return source - - @property - def included(self): - for g in self.gmm: - if g.included: - return True - return False - - def move_mesh_to_end(self, cycle: int): - self.offset += cycle - vs = None - for i in range(len(self)): - mapper = self.gmm[i].mapper - if mapper is not None and mapper.GetInput() is not None: - vs_vtk = mapper.GetInput().GetPoints() - if vs is None: - vs = numpy_support.vtk_to_numpy(vs_vtk.GetData()) - vs[:, 0] += cycle * 2 - vs_vtk.SetData(numpy_support.numpy_to_vtk(vs)) - - def pick(self, actor_address: str) -> bool: - return actor_address in self.addresses_dict - - def __init__(self, opt: options.Options, shape_path: List[str], render: ui_utils.CanvasRender, render_number: int, - view_style: ui_utils.ViewStyle, to_init=True): - self.view_style = view_style - self.votes = {} - self.shape_id = shape_path[1] - self.gmm_id = render_number - self.render = render - self.symmetric_mode = sum(opt.symmetric) > 0 and False - self.selected = None - self.offset = render_number - # self.arrows = arrows.ArrowManger(render) - if self.shape_id != '-1': - self.base_mesh = files_utils.load_mesh( ''.join(shape_path)) - self.raw_gmm = files_utils.load_gmm(f'{shape_path[0]}/{shape_path[1]}.txt', as_np=True)[:-1] - else: - self.base_mesh = None - self.raw_gmm = [] - self.to_init = to_init - self.is_changed = False - self.gmm: List[gaussian_status.GaussianStatus] = self.add_gmm() - self.vs = self.faces = None - self.add_mesh(self.base_mesh) - self.addresses_dict: Dict[str, int] = {self.gmm[i].get_address(): i for i in range(len(self.gmm))} - if self.symmetric_mode: - for i in range(len(self) // 2): - self.make_twins(self.gmm[i].get_address(), self.gmm[i + len(self) // 2].get_address()) - self.toggle_all() - # if self.raw_gmm: - # gmms = self.get_gmm()[0] - # files_utils.export_gmm(gmms, 0, f"./{render_number}") - - -class GmmStatuses: - - def __len__(self): - return len(self.gmms) - - def switch_arrows(self, arrow_type: ui_utils.Buttons): - self.main_gmm.switch_arrows(arrow_type) - - def turn_gmm_off(self): - self.main_gmm.turn_gmm_off() - - def turn_gmm_on(self): - self.main_gmm.turn_gmm_on() - - def update_gmm(self, button: ui_utils.Buttons, key: str) -> bool: - return self.main_gmm.update_gmm(button, key) - - def toggle_symmetric(self, force_include: bool = False): - for gmm in self.gmms: - gmm.toggle_symmetric(force_include) - - def event_manger(self, object_id: str): - for gmm in self.gmms: - if gmm.event_manger(object_id): - return True - return False - - def toggle_inclusion(self, object_id: str): - for gmm in self.gmms: - if gmm.toggle_inclusion(object_id)[0]: - return True - return False - - @property - def main_gmm(self) -> GmmMeshStage: - return self.gmms[0] - - def reset(self): - for gmm in self.gmms: - gmm.reset() - - def set_brush(self, is_draw: bool): - for gmm in self.gmms: - gmm.set_brush(is_draw) - - def move_mesh_to_end(self, ptr: int): - self.gmms[ptr].move_mesh_to_end(len(self)) - - def pick(self, actor_address: str) -> Optional[GmmMeshStage]: - for gmm in self.gmms: - if gmm.pick(actor_address): - return gmm - return None - - def __init__(self, opt: options.Options, shape_paths: List[List[str]], render, view_styles: List[ui_utils.ViewStyle]): - self.gmms = [GmmMeshStage(opt, shape_path, render, i, view_style) for i, (shape_path, view_style) in - enumerate(zip(shape_paths, view_styles))] - - -def to_local(func): - def inner(self: MeshGmmStatuses.TransitionController, mouse_pos: Optional[Tuple[int, int]], *args, **kwargs): - if mouse_pos is not None: - size = self.render.GetRenderWindow().GetScreenSize() - aspect = self.render.GetAspect() - mouse_pos = float(mouse_pos[0]) / size[0] - .5, float(mouse_pos[1]) / size[1] - .5 - mouse_pos = torch.tensor([mouse_pos[0] / aspect[1], mouse_pos[1] / aspect[0]]) - return func(self, mouse_pos, *args, **kwargs) - return inner - - -class MeshGmmStatuses(GmmStatuses): - - def aggregate_votes(self, select: bool): - if self.cur_canvas < len(self.gmms): - stage = self.gmms[self.cur_canvas] - changed = stage.aggregate_votes() - changed = list(filter(lambda x: not stage.gmm[x].disabled and stage.gmm[x].is_selected != select, changed)) - for item in changed: - stage.gmm[item].toggle_selection() - return len(changed) > 0 - - def vote(self, *actors: Optional[vtk.vtkActor]): - self.gmms[self.cur_canvas].vote(*actors) - - def init_draw(self, side: int): - self.cur_canvas = side - - def sort_gmms(self, gmms, included): - order = torch.arange(gmms[0].shape[2]).tolist() - order = sorted(order, key=lambda x: included[x][0] * 100 + included[x][1]) - gmms = [[item[:, :, order[i]] for item in gmms] for i in range(gmms[0].shape[2])] - gmms = [torch.stack([gmms[j][i] for j in range(len(gmms))], dim=2) for i in range(len(gmms[0]))] - return gmms - - def save_light(self, root, gmms): - gmms = self.sort_gmms(*gmms) - save_dict = {'ids': { - gmm.shape_id: [gaussian.gaussian_id[1] for gaussian in gmm.gmm if gaussian.included] - for gmm in self.gmms if gmm.included}, - 'gmm': gmms} - path = f"{root}/{files_utils.get_time_name('light')}" - files_utils.save_pickle(save_dict, path) - - def save(self, root: str, gmms): - # for gmm in self.gmms: - # if gmm.included: - # gmm.save(root) - if len(gmms[0]) > 0: - self.save_light(root, gmms) - - def set_brush(self, is_draw: bool): - super(MeshGmmStatuses, self).set_brush(is_draw) - self.main_gmm.render.set_brush(is_draw) - - def update_mesh(self, res=128): - if self.model_process is not None: - self.model_process.get_mesh(res) - return True - return False - # self.all_info[side] = gaussian_inds - - def request_gmm(self) -> Tuple[TS, T]: - gmm, included = self.main_gmm.get_gmm() - return gmm, included - - def replace_mesh(self): - if self.model_process is not None: - self.model_process.replace_mesh() - - def exit(self): - if self.model_process is not None: - self.model_process.exit() - - @property - def main_stage(self) -> GmmMeshStage: - return self.gmms[0] - - @property - def stages(self): - return self.gmms - - class TransitionController: - - @property - def moving_axis(self) -> int: - return {ui_utils.EditDirection.X_Axis: 0, - ui_utils.EditDirection.Y_Axis: 2, - ui_utils.EditDirection.Z_Axis: 1}[self.edit_direction] - - def get_delta_translation(self, mouse_pos: T) -> ARRAY: - delta_3d = np.zeros(3) - axis = self.moving_axis - vec = mouse_pos - self.origin_mouse - delta = torch.einsum('d,d', vec, self.dir_2d[:, axis]) - delta_3d[axis] = delta - return delta_3d - - def get_delta_rotation(self, mouse_pos: T) -> ARRAY: - projections = [] - for pos in (self.origin_mouse, mouse_pos): - vec = pos - self.transition_origin_2d - projection = torch.einsum('d,da->a', vec, self.dir_2d) - projection[self.moving_axis] = 0 - projection = nnf.normalize(projection, p=2, dim=0) - projections.append(projection) - sign = (projections[0][(self.moving_axis + 2) % 3] * projections[1][(self.moving_axis + 1) % 3] - - projections[0][(self.moving_axis + 1) % 3] * projections[1][(self.moving_axis + 2) % 3] ).sign() - angle = (torch.acos(torch.einsum('d,d', *projections)) * sign).item() - return ui_utils.get_rotation_matrix(angle, self.moving_axis) - - def get_delta_scaling(self, mouse_pos: T) -> ARRAY: - raise NotImplementedError - - def toggle_edit_direction(self, direction: ui_utils.EditDirection): - self.edit_direction = direction - - @to_local - def get_transition(self, mouse_pos: Optional[T]) -> ui_utils.Transition: - transition = ui_utils.Transition(self.transition_origin.numpy(), self.transition_type) - if mouse_pos is not None: - if self.transition_type is ui_utils.EditType.Translating: - transition.translation = self.get_delta_translation(mouse_pos) - elif self.transition_type is ui_utils.EditType.Rotating: - transition.rotation = self.get_delta_rotation(mouse_pos) - elif self.transition_type is ui_utils.EditType.Scaling: - transition.rotation = self.get_delta_scaling(mouse_pos) - return transition - - @to_local - def init_transition(self, mouse_pos: Tuple[int, int], transition_origin: T, transition_type: ui_utils.EditType): - transform_mat_vtk = self.camera.GetViewTransformMatrix() - dir_2d = torch.zeros(3, 4) - for i in range(3): - for j in range(4): - dir_2d[i, j] = transform_mat_vtk.GetElement(i, j) - self.transition_origin = transition_origin - transition_origin = torch.tensor(transition_origin.tolist() + [1]) - transition_origin_2d = torch.einsum('ab,b->a', dir_2d, transition_origin) - self.transition_origin_2d = transition_origin_2d[:2] / transition_origin_2d[-1].abs() - # print(f"<{self.transition_origin[0]}, {self.transition_origin[1]}>") - # print(mouse_pos) - self.origin_mouse, self.dir_2d = mouse_pos, nnf.normalize(dir_2d[:2, :3], p=2, dim=1) - self.transition_type = transition_type - - @property - def camera(self): - return self.render.GetActiveCamera() - - def __init__(self, render: ui_utils.CanvasRender): - self.render = render - self.transition_origin = torch.zeros(3) - self.transition_origin_2d = torch.zeros(2) - self.origin_mouse, self.dir_2d = torch.zeros(2), torch.zeros(2, 3) - self.edit_direction = ui_utils.EditDirection.X_Axis - self.transition_type = ui_utils.EditType.Translating - - @property - def selected_gaussians(self) -> Iterable[gaussian_status.GaussianStatus]: - return filter(lambda x: x.is_selected, self.main_stage.gmm) - - def temporary_transition(self, mouse_pos: Optional[Tuple[int, int]] = None, end=False) -> bool: - transition = self.transition_controller.get_transition(mouse_pos) - is_change = False - for gaussian in self.selected_gaussians: - if end: - is_change = gaussian.end_transition(transition) or is_change - else: - is_change = gaussian.temporary_transition(transition) or is_change - return is_change - - def end_transition(self, mouse_pos: Optional[Tuple[int, int]]) -> bool: - return self.temporary_transition(mouse_pos, True) - - def init_transition(self, mouse_pos, transition_type: ui_utils.EditType): - center = list(map(lambda x: x.mu_baked, self.selected_gaussians)) - if len(center) == 0: - return - # center = torch.from_numpy(np.stack(center, axis=0).mean(0)) - center = torch.zeros(3) - self.transition_controller.init_transition(mouse_pos, center, transition_type) - - def toggle_edit_direction(self, direction: ui_utils.EditDirection): - self.transition_controller.toggle_edit_direction(direction) - - def clear_selection(self) -> bool: - is_changed = False - for gaussian in self.selected_gaussians: - gaussian.toggle_selection() - is_changed = True - return is_changed - - def __init__(self, opt: options.Options, shape_paths: List[List[str]], render, view_styles: List[ui_utils.ViewStyle], - with_model: bool): - super(MeshGmmStatuses, self).__init__(opt, shape_paths, render, view_styles) - if with_model: - self.model_process = inference_processing.InferenceProcess(opt, self.main_stage.replace_mesh, - self.request_gmm, - shape_paths) - else: - self.model_process = None - self.counter = 0 - self.cur_canvas = 0 - self.transition_controller = MeshGmmStatuses.TransitionController(self.main_stage.render) - - -class MeshGmmUnited(MeshGmmStatuses): - - def save(self, root: str): - super(MeshGmmUnited, self).save(root) - self.main_gmm.save(root, filter_by_selection) - - def aggregate_votes(self, select: bool): - if self.cur_canvas < len(self.gmms): - stage = self.gmms[self.cur_canvas] - changed = stage.aggregate_votes() - changed = list(filter(lambda x: not stage.gmm[x].disabled and stage.gmm[x].included != select, changed)) - for item in changed: - is_toggled, toggled = stage.toggle_inclusion_by_id(item, select) - if is_toggled: - if toggled[0].included: - new_addresses = self.main_gmm.add_gaussians(toggled) - for gaussian, new_address in zip(toggled, new_addresses): - self.stage_mapper[gaussian.get_address()] = new_address - self.make_twins(toggled, new_addresses) - else: - addresses = [gaussian.get_address() for gaussian in toggled] - addresses = list(filter(lambda x: x in self.stage_mapper, addresses)) - self.main_gmm.remove_gaussians([self.stage_mapper[address] for address in addresses]) - for address in addresses: - del self.stage_mapper[address] - return len(changed) > 0 - else: - return self.update_selection(select) - - def update_selection(self, select: bool): - changed = self.main_stage.aggregate_votes() - changed = filter(lambda x: self.main_stage.gmm[x].is_selected != select, changed) - for item in changed: - self.main_stage.gmm[item].toggle_selection() - return False - - def vote(self, *actors: Optional[vtk.vtkActor]): - if self.cur_canvas < len(self.gmms): - self.gmms[self.cur_canvas].vote(*actors) - else: - self.main_gmm.vote(*actors) - - def reset(self): - super(MeshGmmUnited, self).reset() - self.main_gmm.remove_all() - for gmm in self.gmms: - gmm.toggle_all() - self.stage_mapper = {} - - def event_manger(self, object_id: str): - return self.toggle_inclusion(object_id) or self.main_gmm.event_manger(object_id) - - def make_twins(self, toggled: List[gaussian_status.GaussianStatus], new_addresses : List[str]): - if len(new_addresses) == 2: - self.main_gmm.make_twins(*new_addresses) - else: - if toggled[0].twin is not None and toggled[0].twin.get_address() in self.stage_mapper: - self.main_gmm.make_twins(new_addresses[0], self.stage_mapper[toggled[0].twin.get_address()]) - - def toggle_symmetric(self, force_include: bool = False): - super(MeshGmmUnited, self).toggle_symmetric(force_include) - self.main_gmm.toggle_symmetric(force_include) - - @property - def main_gmm(self) -> GmmMeshStage: - return self.main_gmm_ - - @property - def main_stage(self) -> GmmMeshStage: - return self.main_gmm_ - - def __init__(self, opt: options.Options, gmm_paths: List[int], renders_right, view_styles: List[ui_utils.ViewStyle], - main_render: ui_utils.CanvasRender, with_model: bool): - self.main_gmm_ = GmmMeshStage(opt, -1, main_render, len(gmm_paths), view_styles[-1], to_init=False) - super(MeshGmmUnited, self).__init__(opt, gmm_paths, renders_right, view_styles[:-1], with_model) - self.main_render = main_render - self.reset() - self.stage_mapper: Dict[str, str] = {} - - -def main(): - opt = options.Options(tag="chairs_sym_hard").load() - model = train_utils.model_lc(opt)[0] - model = model.to(CPU) - colors = torch.rand(opt.num_gaussians, 3) - shape_nums = 1103, 1637, 2954, 3631, 4814 - for shape_num in shape_nums: - mesh = files_utils.load_mesh(f"{opt.cp_folder}/occ/samples_{shape_num}") - gmm = files_utils.load_gmm(f"{opt.cp_folder}/gmms/samples_{shape_num}") - vs, faces = mesh - phi, mu, eigen, p, _ = [item.unsqueeze(0).unsqueeze(0) for item in gmm] - gmm = mu, p, phi, eigen - attention = model.get_attention(vs.unsqueeze(0), torch.tensor([shape_num], dtype=torch.int64))[-4:] - # _, supports = gm_utils.hierarchical_gm_log_likelihood_loss([gmm], vs.unsqueeze(0), get_supports=True) - # supports = supports[0][0] - supports = torch.cat(attention, dim=0) - supports = supports.mean(-1).mean(0) - label = supports.argmax(1) - colors_ = colors[label] - files_utils.export_mesh((vs, faces), f"{constants.OUT_ROOT}/{opt.tag}_{shape_num}b", colors=colors_) - return 0 - - -if __name__ == '__main__': - - from utils import train_utils - exit(main()) diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/synthesizer/utils/plot.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/synthesizer/utils/plot.py deleted file mode 100644 index efdb5670b4f26f2110988e818ff8ad9ff7238cef..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/synthesizer/utils/plot.py +++ /dev/null @@ -1,115 +0,0 @@ -import matplotlib -matplotlib.use("Agg") -import matplotlib.pyplot as plt -import numpy as np - - -def split_title_line(title_text, max_words=5): - """ - A function that splits any string based on specific character - (returning it with the string), with maximum number of words on it - """ - seq = title_text.split() - return "\n".join([" ".join(seq[i:i + max_words]) for i in range(0, len(seq), max_words)]) - -def plot_alignment(alignment, path, title=None, split_title=False, max_len=None): - if max_len is not None: - alignment = alignment[:, :max_len] - - fig = plt.figure(figsize=(8, 6)) - ax = fig.add_subplot(111) - - im = ax.imshow( - alignment, - aspect="auto", - origin="lower", - interpolation="none") - fig.colorbar(im, ax=ax) - xlabel = "Decoder timestep" - - if split_title: - title = split_title_line(title) - - plt.xlabel(xlabel) - plt.title(title) - plt.ylabel("Encoder timestep") - plt.tight_layout() - plt.savefig(path, format="png") - plt.close() - - -def plot_spectrogram(pred_spectrogram, path, title=None, split_title=False, target_spectrogram=None, max_len=None, auto_aspect=False): - if max_len is not None: - target_spectrogram = target_spectrogram[:max_len] - pred_spectrogram = pred_spectrogram[:max_len] - - if split_title: - title = split_title_line(title) - - fig = plt.figure(figsize=(10, 8)) - # Set common labels - fig.text(0.5, 0.18, title, horizontalalignment="center", fontsize=16) - - #target spectrogram subplot - if target_spectrogram is not None: - ax1 = fig.add_subplot(311) - ax2 = fig.add_subplot(312) - - if auto_aspect: - im = ax1.imshow(np.rot90(target_spectrogram), aspect="auto", interpolation="none") - else: - im = ax1.imshow(np.rot90(target_spectrogram), interpolation="none") - ax1.set_title("Target Mel-Spectrogram") - fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1) - ax2.set_title("Predicted Mel-Spectrogram") - else: - ax2 = fig.add_subplot(211) - - if auto_aspect: - im = ax2.imshow(np.rot90(pred_spectrogram), aspect="auto", interpolation="none") - else: - im = ax2.imshow(np.rot90(pred_spectrogram), interpolation="none") - fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2) - - plt.tight_layout() - plt.savefig(path, format="png") - plt.close() - - -def plot_spectrogram_and_trace(pred_spectrogram, path, title=None, split_title=False, target_spectrogram=None, max_len=None, auto_aspect=False, sw=None, step=0): - if max_len is not None: - target_spectrogram = target_spectrogram[:max_len] - pred_spectrogram = pred_spectrogram[:max_len] - - if split_title: - title = split_title_line(title) - - fig = plt.figure(figsize=(10, 8)) - # Set common labels - fig.text(0.5, 0.18, title, horizontalalignment="center", fontsize=16) - - #target spectrogram subplot - if target_spectrogram is not None: - ax1 = fig.add_subplot(311) - ax2 = fig.add_subplot(312) - - if auto_aspect: - im = ax1.imshow(np.rot90(target_spectrogram), aspect="auto", interpolation="none") - else: - im = ax1.imshow(np.rot90(target_spectrogram), interpolation="none") - ax1.set_title("Target Mel-Spectrogram") - fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1) - ax2.set_title("Predicted Mel-Spectrogram") - else: - ax2 = fig.add_subplot(211) - - if auto_aspect: - im = ax2.imshow(np.rot90(pred_spectrogram), aspect="auto", interpolation="none") - else: - im = ax2.imshow(np.rot90(pred_spectrogram), interpolation="none") - fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2) - - plt.tight_layout() - plt.savefig(path, format="png") - sw.add_figure("spectrogram", fig, step) - plt.close() \ No newline at end of file diff --git a/spaces/Kimata/Sanskrit-TTS/commons.py b/spaces/Kimata/Sanskrit-TTS/commons.py deleted file mode 100644 index 2153153f527d94e2abb641ea00c80b518ff6c5bd..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/commons.py +++ /dev/null @@ -1,97 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path diff --git a/spaces/Kirihasan/rvc-holo/infer_pack/attentions.py b/spaces/Kirihasan/rvc-holo/infer_pack/attentions.py deleted file mode 100644 index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000 --- a/spaces/Kirihasan/rvc-holo/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from infer_pack import commons -from infer_pack import modules -from infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Kreaols/ChuanhuChatGPT/run_macOS.command b/spaces/Kreaols/ChuanhuChatGPT/run_macOS.command deleted file mode 100644 index 2d26597ae47519f42336ccffc16646713a192ae1..0000000000000000000000000000000000000000 --- a/spaces/Kreaols/ChuanhuChatGPT/run_macOS.command +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# 获取脚本所在目录 -script_dir=$(dirname "$(readlink -f "$0")") - -# 将工作目录更改为脚本所在目录 -cd "$script_dir" || exit - -# 检查Git仓库是否有更新 -git remote update -pwd - -if ! git status -uno | grep 'up to date' > /dev/null; then - # 如果有更新,关闭当前运行的服务器 - pkill -f ChuanhuChatbot.py - - # 拉取最新更改 - git pull - - # 安装依赖 - pip3 install -r requirements.txt - - # 重新启动服务器 - nohup python3 ChuanhuChatbot.py & -fi - -# 检查ChuanhuChatbot.py是否在运行 -if ! pgrep -f ChuanhuChatbot.py > /dev/null; then - # 如果没有运行,启动服务器 - nohup python3 ChuanhuChatbot.py & -fi diff --git a/spaces/KyanChen/RSPrompter/mmpretrain/apis/visual_question_answering.py b/spaces/KyanChen/RSPrompter/mmpretrain/apis/visual_question_answering.py deleted file mode 100644 index 2d056758f39f780c6863ae54b09b8f0cc725ade4..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpretrain/apis/visual_question_answering.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from pathlib import Path -from typing import Callable, List, Optional, Union - -import numpy as np -from mmcv.image import imread -from mmengine.config import Config -from mmengine.dataset import Compose, default_collate - -from mmpretrain.registry import TRANSFORMS -from mmpretrain.structures import DataSample -from .base import BaseInferencer -from .model import list_models - - -class VisualQuestionAnsweringInferencer(BaseInferencer): - """The inferencer for visual question answering. - - Args: - model (BaseModel | str | Config): A model name or a path to the config - file, or a :obj:`BaseModel` object. The model name can be found - by ``VisualQuestionAnsweringInferencer.list_models()`` and you can - also query it in :doc:`/modelzoo_statistics`. - pretrained (str, optional): Path to the checkpoint. If None, it will - try to find a pre-defined weight from the model you specified - (only work if the ``model`` is a model name). Defaults to None. - device (str, optional): Device to run inference. If None, the available - device will be automatically used. Defaults to None. - **kwargs: Other keyword arguments to initialize the model (only work if - the ``model`` is a model name). - - Example: - >>> from mmpretrain import VisualQuestionAnsweringInferencer - >>> inferencer = VisualQuestionAnsweringInferencer('ofa-base_3rdparty-zeroshot_vqa') - >>> inferencer('demo/cat-dog.png', "What's the animal next to the dog?")[0] - {'question': "What's the animal next to the dog?", 'pred_answer': 'cat'} - """ # noqa: E501 - - visualize_kwargs: set = {'resize', 'show', 'show_dir', 'wait_time'} - - def __call__(self, - images: Union[str, np.ndarray, list], - questions: Union[str, list], - return_datasamples: bool = False, - batch_size: int = 1, - objects: Optional[List[str]] = None, - **kwargs) -> dict: - """Call the inferencer. - - Args: - images (str | array | list): The image path or array, or a list of - images. - questions (str | list): The question to the correspondding image. - return_datasamples (bool): Whether to return results as - :obj:`DataSample`. Defaults to False. - batch_size (int): Batch size. Defaults to 1. - objects (List[List[str]], optional): Some algorithms like OFA - fine-tuned VQA models requires extra object description list - for every image. Defaults to None. - resize (int, optional): Resize the short edge of the image to the - specified length before visualization. Defaults to None. - show (bool): Whether to display the visualization result in a - window. Defaults to False. - wait_time (float): The display time (s). Defaults to 0, which means - "forever". - show_dir (str, optional): If not None, save the visualization - results in the specified directory. Defaults to None. - - Returns: - list: The inference results. - """ - if not isinstance(images, (list, tuple)): - assert isinstance(questions, str) - inputs = [{'img': images, 'question': questions}] - if objects is not None: - assert isinstance(objects[0], str) - inputs[0]['objects'] = objects - else: - inputs = [] - for i in range(len(images)): - input_ = {'img': images[i], 'question': questions[i]} - if objects is not None: - input_['objects'] = objects[i] - inputs.append(input_) - - return super().__call__(inputs, return_datasamples, batch_size, - **kwargs) - - def _init_pipeline(self, cfg: Config) -> Callable: - test_pipeline_cfg = cfg.test_dataloader.dataset.pipeline - if test_pipeline_cfg[0]['type'] == 'LoadImageFromFile': - # Image loading is finished in `self.preprocess`. - test_pipeline_cfg = test_pipeline_cfg[1:] - test_pipeline = Compose( - [TRANSFORMS.build(t) for t in test_pipeline_cfg]) - return test_pipeline - - def preprocess(self, inputs: List[dict], batch_size: int = 1): - - def load_image(input_: dict): - img = imread(input_['img']) - if img is None: - raise ValueError(f'Failed to read image {input_}.') - return {**input_, 'img': img} - - pipeline = Compose([load_image, self.pipeline]) - - chunked_data = self._get_chunk_data(map(pipeline, inputs), batch_size) - yield from map(default_collate, chunked_data) - - def visualize(self, - ori_inputs: List[dict], - preds: List[DataSample], - show: bool = False, - wait_time: int = 0, - resize: Optional[int] = None, - show_dir=None): - if not show and show_dir is None: - return None - - if self.visualizer is None: - from mmpretrain.visualization import UniversalVisualizer - self.visualizer = UniversalVisualizer() - - visualization = [] - for i, (input_, data_sample) in enumerate(zip(ori_inputs, preds)): - image = imread(input_['img']) - if isinstance(input_['img'], str): - # The image loaded from path is BGR format. - image = image[..., ::-1] - name = Path(input_['img']).stem - else: - name = str(i) - - if show_dir is not None: - show_dir = Path(show_dir) - show_dir.mkdir(exist_ok=True) - out_file = str((show_dir / name).with_suffix('.png')) - else: - out_file = None - - self.visualizer.visualize_vqa( - image, - data_sample, - resize=resize, - show=show, - wait_time=wait_time, - name=name, - out_file=out_file) - visualization.append(self.visualizer.get_image()) - if show: - self.visualizer.close() - return visualization - - def postprocess(self, - preds: List[DataSample], - visualization: List[np.ndarray], - return_datasamples=False) -> dict: - if return_datasamples: - return preds - - results = [] - for data_sample in preds: - results.append({ - 'question': data_sample.get('question'), - 'pred_answer': data_sample.get('pred_answer'), - }) - - return results - - @staticmethod - def list_models(pattern: Optional[str] = None): - """List all available model names. - - Args: - pattern (str | None): A wildcard pattern to match model names. - - Returns: - List[str]: a list of model names. - """ - return list_models(pattern=pattern, task='Visual Question Answering') diff --git a/spaces/LanguageBind/LanguageBind/languagebind/audio/configuration_audio.py b/spaces/LanguageBind/LanguageBind/languagebind/audio/configuration_audio.py deleted file mode 100644 index 865a496cff50fbac855413220288cd996965468b..0000000000000000000000000000000000000000 --- a/spaces/LanguageBind/LanguageBind/languagebind/audio/configuration_audio.py +++ /dev/null @@ -1,430 +0,0 @@ -import copy -import os -from typing import Union - -from transformers import PretrainedConfig -from transformers.utils import logging - -logger = logging.get_logger(__name__) - - - - - - - -class CLIPTextConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`CLIPTextModel`]. It is used to instantiate a CLIP - text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration - with the defaults will yield a similar configuration to that of the text encoder of the CLIP - [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture. - - Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - Args: - vocab_size (`int`, *optional*, defaults to 49408): - Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by - the `inputs_ids` passed when calling [`CLIPModel`]. - hidden_size (`int`, *optional*, defaults to 512): - Dimensionality of the encoder layers and the pooler layer. - intermediate_size (`int`, *optional*, defaults to 2048): - Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. - num_hidden_layers (`int`, *optional*, defaults to 12): - Number of hidden layers in the Transformer encoder. - num_attention_heads (`int`, *optional*, defaults to 8): - Number of attention heads for each attention layer in the Transformer encoder. - max_position_embeddings (`int`, *optional*, defaults to 77): - The maximum sequence length that this model might ever be used with. Typically set this to something large - just in case (e.g., 512 or 1024 or 2048). - hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): - The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, - `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. - layer_norm_eps (`float`, *optional*, defaults to 1e-5): - The epsilon used by the layer normalization layers. - attention_dropout (`float`, *optional*, defaults to 0.0): - The dropout ratio for the attention probabilities. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - initializer_factor (`float`, *optional*, defaults to 1): - A factor for initializing all weight matrices (should be kept to 1, used internally for initialization - testing). - - Example: - - ```python - >>> from transformers import CLIPTextConfig, CLIPTextModel - - >>> # Initializing a CLIPTextConfig with openai/clip-vit-base-patch32 style configuration - >>> configuration = CLIPTextConfig() - - >>> # Initializing a CLIPTextModel (with random weights) from the openai/clip-vit-base-patch32 style configuration - >>> model = CLIPTextModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - model_type = "clip_text_model" - - def __init__( - self, - vocab_size=49408, - hidden_size=512, - intermediate_size=2048, - projection_dim=512, - num_hidden_layers=12, - num_attention_heads=8, - max_position_embeddings=77, - hidden_act="quick_gelu", - layer_norm_eps=1e-5, - attention_dropout=0.0, - initializer_range=0.02, - initializer_factor=1.0, - # This differs from `CLIPTokenizer`'s default and from openai/clip - # See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538 - pad_token_id=1, - bos_token_id=49406, - eos_token_id=49407, - **kwargs, - ): - super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) - - self.vocab_size = vocab_size - self.hidden_size = hidden_size - self.intermediate_size = intermediate_size - self.projection_dim = projection_dim - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.max_position_embeddings = max_position_embeddings - self.layer_norm_eps = layer_norm_eps - self.hidden_act = hidden_act - self.initializer_range = initializer_range - self.initializer_factor = initializer_factor - self.attention_dropout = attention_dropout - self.add_time_attn = False ###################################### - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": - cls._set_token_in_kwargs(kwargs) - - config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) - - # get the text config dict if we are loading from CLIPConfig - if config_dict.get("model_type") == "clip": - config_dict = config_dict["text_config"] - - if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: - logger.warning( - f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " - f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." - ) - - return cls.from_dict(config_dict, **kwargs) - - - - -class CLIPVisionConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`CLIPVisionModel`]. It is used to instantiate a - CLIP vision encoder according to the specified arguments, defining the model architecture. Instantiating a - configuration with the defaults will yield a similar configuration to that of the vision encoder of the CLIP - [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture. - - Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - Args: - hidden_size (`int`, *optional*, defaults to 768): - Dimensionality of the encoder layers and the pooler layer. - intermediate_size (`int`, *optional*, defaults to 3072): - Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. - num_hidden_layers (`int`, *optional*, defaults to 12): - Number of hidden layers in the Transformer encoder. - num_attention_heads (`int`, *optional*, defaults to 12): - Number of attention heads for each attention layer in the Transformer encoder. - image_size (`int`, *optional*, defaults to 224): - The size (resolution) of each image. - patch_size (`int`, *optional*, defaults to 32): - The size (resolution) of each patch. - hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): - The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, - `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. - layer_norm_eps (`float`, *optional*, defaults to 1e-5): - The epsilon used by the layer normalization layers. - attention_dropout (`float`, *optional*, defaults to 0.0): - The dropout ratio for the attention probabilities. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - initializer_factor (`float`, *optional*, defaults to 1): - A factor for initializing all weight matrices (should be kept to 1, used internally for initialization - testing). - - Example: - - ```python - >>> from transformers import CLIPVisionConfig, CLIPVisionModel - - >>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration - >>> configuration = CLIPVisionConfig() - - >>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration - >>> model = CLIPVisionModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - - model_type = "clip_vision_model" - - def __init__( - self, - hidden_size=768, - intermediate_size=3072, - projection_dim=512, - num_hidden_layers=12, - num_attention_heads=12, - num_channels=3, - image_size=224, - patch_size=32, - hidden_act="quick_gelu", - layer_norm_eps=1e-5, - attention_dropout=0.0, - initializer_range=0.02, - initializer_factor=1.0, - - add_time_attn=False, ################################ - num_frames=1, ################################ - force_patch_dropout=0.0, ################################ - lora_r=2, ################################ - lora_alpha=16, ################################ - lora_dropout=0.0, ################################ - num_mel_bins=0.0, ################################ - target_length=0.0, ################################ - video_decode_backend='decord', ######################### - audio_sample_rate=16000, - audio_mean=0.5, - audio_std=0.5, - **kwargs, - ): - super().__init__(**kwargs) - - self.hidden_size = hidden_size - self.intermediate_size = intermediate_size - self.projection_dim = projection_dim - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.num_channels = num_channels - self.patch_size = patch_size - self.image_size = image_size - self.initializer_range = initializer_range - self.initializer_factor = initializer_factor - self.attention_dropout = attention_dropout - self.layer_norm_eps = layer_norm_eps - self.hidden_act = hidden_act - - self.add_time_attn = add_time_attn ################ - self.num_frames = num_frames ################ - self.force_patch_dropout = force_patch_dropout ################ - self.lora_r = lora_r ################ - self.lora_alpha = lora_alpha ################ - self.lora_dropout = lora_dropout ################ - self.num_mel_bins = num_mel_bins ################ - self.target_length = target_length ################ - self.video_decode_backend = video_decode_backend ################ - - self.audio_sample_rate = audio_sample_rate - self.audio_mean = audio_mean - self.audio_std = audio_std - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": - cls._set_token_in_kwargs(kwargs) - - config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) - - # get the vision config dict if we are loading from CLIPConfig - if config_dict.get("model_type") == "clip": - config_dict = config_dict["vision_config"] - - if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: - logger.warning( - f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " - f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." - ) - - return cls.from_dict(config_dict, **kwargs) - - -class LanguageBindAudioConfig(PretrainedConfig): - r""" - [`CLIPConfig`] is the configuration class to store the configuration of a [`CLIPModel`]. It is used to instantiate - a CLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating - a configuration with the defaults will yield a similar configuration to that of the CLIP - [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture. - - Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - Args: - text_config (`dict`, *optional*): - Dictionary of configuration options used to initialize [`CLIPTextConfig`]. - vision_config (`dict`, *optional*): - Dictionary of configuration options used to initialize [`CLIPVisionConfig`]. - projection_dim (`int`, *optional*, defaults to 512): - Dimentionality of text and vision projection layers. - logit_scale_init_value (`float`, *optional*, defaults to 2.6592): - The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation. - kwargs (*optional*): - Dictionary of keyword arguments. - - Example: - - ```python - >>> from transformers import CLIPConfig, CLIPModel - - >>> # Initializing a CLIPConfig with openai/clip-vit-base-patch32 style configuration - >>> configuration = CLIPConfig() - - >>> # Initializing a CLIPModel (with random weights) from the openai/clip-vit-base-patch32 style configuration - >>> model = CLIPModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - - >>> # We can also initialize a CLIPConfig from a CLIPTextConfig and a CLIPVisionConfig - >>> from transformers import CLIPTextConfig, CLIPVisionConfig - - >>> # Initializing a CLIPText and CLIPVision configuration - >>> config_text = CLIPTextConfig() - >>> config_vision = CLIPVisionConfig() - - >>> config = CLIPConfig.from_text_vision_configs(config_text, config_vision) - ```""" - - model_type = "LanguageBindAudio" - is_composition = True - - def __init__( - self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs - ): - # If `_config_dict` exist, we use them for the backward compatibility. - # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot - # of confusion!). - text_config_dict = kwargs.pop("text_config_dict", None) - vision_config_dict = kwargs.pop("vision_config_dict", None) - - super().__init__(**kwargs) - - # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in - # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most - # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. - if text_config_dict is not None: - if text_config is None: - text_config = {} - - # This is the complete result when using `text_config_dict`. - _text_config_dict = CLIPTextConfig(**text_config_dict).to_dict() - - # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. - for key, value in _text_config_dict.items(): - if key in text_config and value != text_config[key] and key not in ["transformers_version"]: - # If specified in `text_config_dict` - if key in text_config_dict: - message = ( - f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. " - f'The value `text_config_dict["{key}"]` will be used instead.' - ) - # If inferred from default argument values (just to be super careful) - else: - message = ( - f"`text_config_dict` is provided which will be used to initialize `CLIPTextConfig`. The " - f'value `text_config["{key}"]` will be overriden.' - ) - logger.warning(message) - - # Update all values in `text_config` with the ones in `_text_config_dict`. - text_config.update(_text_config_dict) - - if vision_config_dict is not None: - if vision_config is None: - vision_config = {} - - # This is the complete result when using `vision_config_dict`. - _vision_config_dict = CLIPVisionConfig(**vision_config_dict).to_dict() - # convert keys to string instead of integer - if "id2label" in _vision_config_dict: - _vision_config_dict["id2label"] = { - str(key): value for key, value in _vision_config_dict["id2label"].items() - } - - # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. - for key, value in _vision_config_dict.items(): - if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: - # If specified in `vision_config_dict` - if key in vision_config_dict: - message = ( - f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different " - f'values. The value `vision_config_dict["{key}"]` will be used instead.' - ) - # If inferred from default argument values (just to be super careful) - else: - message = ( - f"`vision_config_dict` is provided which will be used to initialize `CLIPVisionConfig`. " - f'The value `vision_config["{key}"]` will be overriden.' - ) - logger.warning(message) - - # Update all values in `vision_config` with the ones in `_vision_config_dict`. - vision_config.update(_vision_config_dict) - - if text_config is None: - text_config = {} - logger.info("`text_config` is `None`. Initializing the `CLIPTextConfig` with default values.") - - if vision_config is None: - vision_config = {} - logger.info("`vision_config` is `None`. initializing the `CLIPVisionConfig` with default values.") - - self.text_config = CLIPTextConfig(**text_config) - self.vision_config = CLIPVisionConfig(**vision_config) - - self.projection_dim = projection_dim - self.logit_scale_init_value = logit_scale_init_value - self.initializer_factor = 1.0 - - @classmethod - def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs): - r""" - Instantiate a [`CLIPConfig`] (or a derived class) from clip text model configuration and clip vision model - configuration. - - Returns: - [`CLIPConfig`]: An instance of a configuration object - """ - - return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) - - def to_dict(self): - """ - Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. - - Returns: - `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, - """ - output = copy.deepcopy(self.__dict__) - output["text_config"] = self.text_config.to_dict() - output["vision_config"] = self.vision_config.to_dict() - output["model_type"] = self.__class__.model_type - return output - - - - - - - - - - diff --git a/spaces/LittleYuan/My-Real-Bot/tests/test_discriminator_arch.py b/spaces/LittleYuan/My-Real-Bot/tests/test_discriminator_arch.py deleted file mode 100644 index c56a40c7743630aa63b3e99bca8dc1a85949c4c5..0000000000000000000000000000000000000000 --- a/spaces/LittleYuan/My-Real-Bot/tests/test_discriminator_arch.py +++ /dev/null @@ -1,19 +0,0 @@ -import torch - -from realesrgan.archs.discriminator_arch import UNetDiscriminatorSN - - -def test_unetdiscriminatorsn(): - """Test arch: UNetDiscriminatorSN.""" - - # model init and forward (cpu) - net = UNetDiscriminatorSN(num_in_ch=3, num_feat=4, skip_connection=True) - img = torch.rand((1, 3, 32, 32), dtype=torch.float32) - output = net(img) - assert output.shape == (1, 1, 32, 32) - - # model init and forward (gpu) - if torch.cuda.is_available(): - net.cuda() - output = net(img.cuda()) - assert output.shape == (1, 1, 32, 32) diff --git a/spaces/LongBeattz/runwayml-stable-diffusion-v1-5/README.md b/spaces/LongBeattz/runwayml-stable-diffusion-v1-5/README.md deleted file mode 100644 index de176b1ad84ffd972670d6e5a0005fdc514f7202..0000000000000000000000000000000000000000 --- a/spaces/LongBeattz/runwayml-stable-diffusion-v1-5/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Runwayml Stable Diffusion V1 5 -emoji: 😻 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.13.0 -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Loren/Streamlit_OCR_comparator/app_pages/home.py b/spaces/Loren/Streamlit_OCR_comparator/app_pages/home.py deleted file mode 100644 index ba025f10ec1c89895d4be5bf99704fedcde53182..0000000000000000000000000000000000000000 --- a/spaces/Loren/Streamlit_OCR_comparator/app_pages/home.py +++ /dev/null @@ -1,19 +0,0 @@ -import streamlit as st - -def app(): - st.image('ocr.png') - - st.write("") - - st.markdown('''#### OCR, or Optical Character Recognition, is a computer vision task, \ - which includes the detection of text areas, and the recognition of characters.''') - st.write("") - st.write("") - - st.markdown("##### This app allows you to compare, from a given image, the results of different solutions:") - st.markdown("##### *EasyOcr, PaddleOCR, MMOCR, Tesseract*") - st.write("") - st.write("") - st.markdown("👈 Select the **About** page from the sidebar for information on how the app works") - - st.markdown("👈 or directly select the **App** page") \ No newline at end of file diff --git a/spaces/ML701G7/taim-gan/src/models/modules/upsample.py b/spaces/ML701G7/taim-gan/src/models/modules/upsample.py deleted file mode 100644 index 6f38b7418c031b513b9396d98e8c1fb023efdd6e..0000000000000000000000000000000000000000 --- a/spaces/ML701G7/taim-gan/src/models/modules/upsample.py +++ /dev/null @@ -1,30 +0,0 @@ -"""UpSample module.""" - -from torch import nn - - -def up_sample(in_planes: int, out_planes: int) -> nn.Module: - """UpSample module.""" - return nn.Sequential( - nn.Upsample(scale_factor=2, mode="nearest"), - nn.Conv2d( - in_planes, out_planes * 2, kernel_size=3, stride=1, padding=1, bias=False - ), - nn.InstanceNorm2d(out_planes * 2), - nn.GLU(dim=1), - ) - - -def img_up_block(in_planes: int, out_planes: int) -> nn.Module: - """ - Image upsample block. - Mainly used to conver the 17 x 17 local feature map from Inception to 32 x 32 size. - """ - return nn.Sequential( - nn.Upsample(scale_factor=1.9, mode="nearest"), - nn.Conv2d( - in_planes, out_planes * 2, kernel_size=3, stride=1, padding=1, bias=False - ), - nn.InstanceNorm2d(out_planes * 2), - nn.GLU(dim=1), - ) diff --git a/spaces/Malifex/flax-anything-v3.0/README.md b/spaces/Malifex/flax-anything-v3.0/README.md deleted file mode 100644 index 5c63a531a19ff477729572e744d7251df2516413..0000000000000000000000000000000000000000 --- a/spaces/Malifex/flax-anything-v3.0/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Flax Anything V3.0 -emoji: 😻 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/MashiroSA/sovits-emu-voice-transform/vdecoder/nsf_hifigan/env.py b/spaces/MashiroSA/sovits-emu-voice-transform/vdecoder/nsf_hifigan/env.py deleted file mode 100644 index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000 --- a/spaces/MashiroSA/sovits-emu-voice-transform/vdecoder/nsf_hifigan/env.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import shutil - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -def build_env(config, config_name, path): - t_path = os.path.join(path, config_name) - if config != t_path: - os.makedirs(path, exist_ok=True) - shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/spaces/MateusA/StoryGenerator/app.py b/spaces/MateusA/StoryGenerator/app.py deleted file mode 100644 index 802d78aff8e7fa6fc5ed4494c961c6cf4b75cebb..0000000000000000000000000000000000000000 --- a/spaces/MateusA/StoryGenerator/app.py +++ /dev/null @@ -1,174 +0,0 @@ -import gradio as gr -from transformers import pipeline -import io, base64 -from PIL import Image -import numpy as np -import tensorflow as tf -import mediapy -import os -import sys -from huggingface_hub import snapshot_download - -import streamlit as st -import firebase_admin -from firebase_admin import credentials -from firebase_admin import firestore -import datetime -import tempfile -from typing import Optional -import numpy as np -from TTS.utils.manage import ModelManager -from TTS.utils.synthesizer import Synthesizer - - -# firestore singleton is a cached multiuser instance to persist shared crowdsource memory -@st.experimental_singleton -def get_db_firestore(): - cred = credentials.Certificate('test.json') - firebase_admin.initialize_app(cred, {'projectId': u'clinical-nlp-b9117',}) - db = firestore.client() - return db - -#start firestore singleton -db = get_db_firestore() - -# create ASR ML pipeline -asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h") - -# create Text Classification pipeline -classifier = pipeline("text-classification") - -# create text generator pipeline -story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator") - -# transcribe function -def transcribe(audio): - text = asr(audio)["text"] - return text - -def speech_to_text(speech): - text = asr(speech)["text"] - return text - -def text_to_sentiment(text): - sentiment = classifier(text)[0]["label"] - return sentiment - -def upsert(text): - date_time =str(datetime.datetime.today()) - doc_ref = db.collection('Text2SpeechSentimentSave').document(date_time) - doc_ref.set({u'firefield': 'Recognize Speech', u'first': 'https://huggingface.co/spaces/awacke1/Text2SpeechSentimentSave', u'last': text, u'born': date_time,}) - saved = select('Text2SpeechSentimentSave', date_time) - # check it here: https://console.firebase.google.com/u/0/project/clinical-nlp-b9117/firestore/data/~2FStreamlitSpaces - return saved - -def select(collection, document): - doc_ref = db.collection(collection).document(document) - doc = doc_ref.get() - docid = ("The id is: ", doc.id) - contents = ("The contents are: ", doc.to_dict()) - return contents - -def selectall(text): - docs = db.collection('Text2SpeechSentimentSave').stream() - doclist='' - for doc in docs: - r=(f'{doc.id} => {doc.to_dict()}') - doclist += r - return doclist - -# story gen -def generate_story(choice, input_text): - query = " <{0}> {1}".format(choice, input_text) - generated_text = story_gen(query) - generated_text = generated_text[0]['generated_text'] - generated_text = generated_text.split('> ')[2] - return generated_text - -# images gen -def generate_images(text): - steps=50 - width=256 - height=256 - num_images=4 - diversity=6 - image_bytes = image_gen(text, steps, width, height, num_images, diversity) - generated_images = [] - for image in image_bytes[1]: - image_str = image[0] - image_str = image_str.replace("data:image/png;base64,","") - decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8")) - img = Image.open(io.BytesIO(decoded_bytes)) - generated_images.append(img) - return generated_images - -# reductionism - interpolate 4 images - todo - unhardcode the pattern -def generate_interpolation(gallery): - times_to_interpolate = 4 - generated_images = [] - for image_str in gallery: - image_str = image_str.replace("data:image/png;base64,","") - decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8")) - img = Image.open(io.BytesIO(decoded_bytes)) - generated_images.append(img) - generated_images[0].save('frame_0.png') - generated_images[1].save('frame_1.png') - generated_images[2].save('frame_2.png') - generated_images[3].save('frame_3.png') - input_frames = ["frame_0.png", "frame_1.png", "frame_2.png", "frame_3.png"] - frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, interpolator)) - mediapy.write_video("out.mp4", frames, fps=15) - return "out.mp4" - -# image generator -image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion") - -# video generator -os.system("git clone https://github.com/google-research/frame-interpolation") -sys.path.append("frame-interpolation") -from eval import interpolator, util - -ffmpeg_path = util.get_ffmpeg_path() -mediapy.set_ffmpeg(ffmpeg_path) -model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style") -interpolator = interpolator.Interpolator(model, None) - -demo = gr.Blocks() -with demo: - - audio_file = gr.inputs.Audio(source="microphone", type="filepath") - text = gr.Textbox() - label = gr.Label() - saved = gr.Textbox() - savedAll = gr.Textbox() - audio = gr.Audio(label="Output", interactive=False) - - b1 = gr.Button("Recognize Speech") - b2 = gr.Button("Classify Sentiment") - b3 = gr.Button("Save Speech to Text") - b4 = gr.Button("Retrieve All") - - input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], value='sci_fi', label="Genre") - input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text") - - gr.Markdown("1. Select a type of story, then write some starting text! Then hit the 'Generate Story' button to generate a story! Feel free to edit the generated story afterwards!") - button_gen_story = gr.Button("Generate Story") - gr.Markdown("2. After generating a story, hit the 'Generate Images' button to create some visuals for your story! (Can re-run multiple times!)") - button_gen_images = gr.Button("Generate Images") - gr.Markdown("3. After generating some images, hit the 'Generate Video' button to create a short video by interpolating the previously generated visuals!") - button_gen_video = gr.Button("Generate Video") - output_generated_story = gr.Textbox(label="Generated Story") - output_gallery = gr.Gallery(label="Generated Story Images") - output_interpolation = gr.Video(label="Generated Video") - - # Bind functions to buttons - button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story) - button_gen_images.click(fn=generate_images, inputs=output_generated_story, outputs=output_gallery) - button_gen_video.click(fn=generate_interpolation, inputs=output_gallery, outputs=output_interpolation) - - b1.click(speech_to_text, inputs=audio_file, outputs=input_start_text ) - b2.click(text_to_sentiment, inputs=text, outputs=label) - b3.click(upsert, inputs=text, outputs=saved) - b4.click(selectall, inputs=text, outputs=savedAll) - -demo.launch(debug=True, enable_queue=True) \ No newline at end of file diff --git a/spaces/Mecca/whisper-webui/src/utils.py b/spaces/Mecca/whisper-webui/src/utils.py deleted file mode 100644 index 7f4ef3d71260034f655d6362f92e866b8777d16d..0000000000000000000000000000000000000000 --- a/spaces/Mecca/whisper-webui/src/utils.py +++ /dev/null @@ -1,135 +0,0 @@ -import textwrap -import unicodedata -import re - -import zlib -from typing import Iterator, TextIO -import tqdm - -import urllib3 - - -def exact_div(x, y): - assert x % y == 0 - return x // y - - -def str2bool(string): - str2val = {"True": True, "False": False} - if string in str2val: - return str2val[string] - else: - raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}") - - -def optional_int(string): - return None if string == "None" else int(string) - - -def optional_float(string): - return None if string == "None" else float(string) - - -def compression_ratio(text) -> float: - return len(text) / len(zlib.compress(text.encode("utf-8"))) - - -def format_timestamp(seconds: float, always_include_hours: bool = False, fractionalSeperator: str = '.'): - assert seconds >= 0, "non-negative timestamp expected" - milliseconds = round(seconds * 1000.0) - - hours = milliseconds // 3_600_000 - milliseconds -= hours * 3_600_000 - - minutes = milliseconds // 60_000 - milliseconds -= minutes * 60_000 - - seconds = milliseconds // 1_000 - milliseconds -= seconds * 1_000 - - hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else "" - return f"{hours_marker}{minutes:02d}:{seconds:02d}{fractionalSeperator}{milliseconds:03d}" - - -def write_txt(transcript: Iterator[dict], file: TextIO): - for segment in transcript: - print(segment['text'].strip(), file=file, flush=True) - - -def write_vtt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None): - print("WEBVTT\n", file=file) - for segment in transcript: - text = process_text(segment['text'], maxLineWidth).replace('-->', '->') - - print( - f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n" - f"{text}\n", - file=file, - flush=True, - ) - - -def write_srt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None): - """ - Write a transcript to a file in SRT format. - Example usage: - from pathlib import Path - from whisper.utils import write_srt - result = transcribe(model, audio_path, temperature=temperature, **args) - # save SRT - audio_basename = Path(audio_path).stem - with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt: - write_srt(result["segments"], file=srt) - """ - for i, segment in enumerate(transcript, start=1): - text = process_text(segment['text'].strip(), maxLineWidth).replace('-->', '->') - - # write srt lines - print( - f"{i}\n" - f"{format_timestamp(segment['start'], always_include_hours=True, fractionalSeperator=',')} --> " - f"{format_timestamp(segment['end'], always_include_hours=True, fractionalSeperator=',')}\n" - f"{text}\n", - file=file, - flush=True, - ) - -def process_text(text: str, maxLineWidth=None): - if (maxLineWidth is None or maxLineWidth < 0): - return text - - lines = textwrap.wrap(text, width=maxLineWidth, tabsize=4) - return '\n'.join(lines) - -def slugify(value, allow_unicode=False): - """ - Taken from https://github.com/django/django/blob/master/django/utils/text.py - Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated - dashes to single dashes. Remove characters that aren't alphanumerics, - underscores, or hyphens. Convert to lowercase. Also strip leading and - trailing whitespace, dashes, and underscores. - """ - value = str(value) - if allow_unicode: - value = unicodedata.normalize('NFKC', value) - else: - value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') - value = re.sub(r'[^\w\s-]', '', value.lower()) - return re.sub(r'[-\s]+', '-', value).strip('-_') - -def download_file(url: str, destination: str): - with urllib3.request.urlopen(url) as source, open(destination, "wb") as output: - with tqdm( - total=int(source.info().get("Content-Length")), - ncols=80, - unit="iB", - unit_scale=True, - unit_divisor=1024, - ) as loop: - while True: - buffer = source.read(8192) - if not buffer: - break - - output.write(buffer) - loop.update(len(buffer)) \ No newline at end of file diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/midas/api.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/midas/api.py deleted file mode 100644 index 1ab9f15bf96bbaffcee0e3e29fc9d3979d6c32e8..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/midas/api.py +++ /dev/null @@ -1,169 +0,0 @@ -# based on https://github.com/isl-org/MiDaS - -import cv2 -import os -import torch -import torch.nn as nn -from torchvision.transforms import Compose - -from .midas.dpt_depth import DPTDepthModel -from .midas.midas_net import MidasNet -from .midas.midas_net_custom import MidasNet_small -from .midas.transforms import Resize, NormalizeImage, PrepareForNet -from annotator.util import annotator_ckpts_path - - -ISL_PATHS = { - "dpt_large": os.path.join(annotator_ckpts_path, "dpt_large-midas-2f21e586.pt"), - "dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"), - "midas_v21": "", - "midas_v21_small": "", -} - -remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt" - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -def load_midas_transform(model_type): - # https://github.com/isl-org/MiDaS/blob/master/run.py - # load transform only - if model_type == "dpt_large": # DPT-Large - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "dpt_hybrid": # DPT-Hybrid - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "midas_v21": - net_w, net_h = 384, 384 - resize_mode = "upper_bound" - normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - - elif model_type == "midas_v21_small": - net_w, net_h = 256, 256 - resize_mode = "upper_bound" - normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - - else: - assert False, f"model_type '{model_type}' not implemented, use: --model_type large" - - transform = Compose( - [ - Resize( - net_w, - net_h, - resize_target=None, - keep_aspect_ratio=True, - ensure_multiple_of=32, - resize_method=resize_mode, - image_interpolation_method=cv2.INTER_CUBIC, - ), - normalization, - PrepareForNet(), - ] - ) - - return transform - - -def load_model(model_type): - # https://github.com/isl-org/MiDaS/blob/master/run.py - # load network - model_path = ISL_PATHS[model_type] - if model_type == "dpt_large": # DPT-Large - model = DPTDepthModel( - path=model_path, - backbone="vitl16_384", - non_negative=True, - ) - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "dpt_hybrid": # DPT-Hybrid - if not os.path.exists(model_path): - from basicsr.utils.download_util import load_file_from_url - load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path) - - model = DPTDepthModel( - path=model_path, - backbone="vitb_rn50_384", - non_negative=True, - ) - net_w, net_h = 384, 384 - resize_mode = "minimal" - normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) - - elif model_type == "midas_v21": - model = MidasNet(model_path, non_negative=True) - net_w, net_h = 384, 384 - resize_mode = "upper_bound" - normalization = NormalizeImage( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] - ) - - elif model_type == "midas_v21_small": - model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True, - non_negative=True, blocks={'expand': True}) - net_w, net_h = 256, 256 - resize_mode = "upper_bound" - normalization = NormalizeImage( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] - ) - - else: - print(f"model_type '{model_type}' not implemented, use: --model_type large") - assert False - - transform = Compose( - [ - Resize( - net_w, - net_h, - resize_target=None, - keep_aspect_ratio=True, - ensure_multiple_of=32, - resize_method=resize_mode, - image_interpolation_method=cv2.INTER_CUBIC, - ), - normalization, - PrepareForNet(), - ] - ) - - return model.eval(), transform - - -class MiDaSInference(nn.Module): - MODEL_TYPES_TORCH_HUB = [ - "DPT_Large", - "DPT_Hybrid", - "MiDaS_small" - ] - MODEL_TYPES_ISL = [ - "dpt_large", - "dpt_hybrid", - "midas_v21", - "midas_v21_small", - ] - - def __init__(self, model_type): - super().__init__() - assert (model_type in self.MODEL_TYPES_ISL) - model, _ = load_model(model_type) - self.model = model - self.model.train = disabled_train - - def forward(self, x): - with torch.no_grad(): - prediction = self.model(x) - return prediction - diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/core/seg/__init__.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/core/seg/__init__.py deleted file mode 100644 index 93bc129b685e4a3efca2cc891729981b2865900d..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/core/seg/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .builder import build_pixel_sampler -from .sampler import BasePixelSampler, OHEMPixelSampler - -__all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler'] diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/decode_heads/fpn_head.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/decode_heads/fpn_head.py deleted file mode 100644 index 1241c55b0813d1ecdddf1e66e7c5031fbf78ed50..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/decode_heads/fpn_head.py +++ /dev/null @@ -1,68 +0,0 @@ -import numpy as np -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import ConvModule - -from annotator.uniformer.mmseg.ops import resize -from ..builder import HEADS -from .decode_head import BaseDecodeHead - - -@HEADS.register_module() -class FPNHead(BaseDecodeHead): - """Panoptic Feature Pyramid Networks. - - This head is the implementation of `Semantic FPN - `_. - - Args: - feature_strides (tuple[int]): The strides for input feature maps. - stack_lateral. All strides suppose to be power of 2. The first - one is of largest resolution. - """ - - def __init__(self, feature_strides, **kwargs): - super(FPNHead, self).__init__( - input_transform='multiple_select', **kwargs) - assert len(feature_strides) == len(self.in_channels) - assert min(feature_strides) == feature_strides[0] - self.feature_strides = feature_strides - - self.scale_heads = nn.ModuleList() - for i in range(len(feature_strides)): - head_length = max( - 1, - int(np.log2(feature_strides[i]) - np.log2(feature_strides[0]))) - scale_head = [] - for k in range(head_length): - scale_head.append( - ConvModule( - self.in_channels[i] if k == 0 else self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - if feature_strides[i] != feature_strides[0]: - scale_head.append( - nn.Upsample( - scale_factor=2, - mode='bilinear', - align_corners=self.align_corners)) - self.scale_heads.append(nn.Sequential(*scale_head)) - - def forward(self, inputs): - - x = self._transform_inputs(inputs) - - output = self.scale_heads[0](x[0]) - for i in range(1, len(self.feature_strides)): - # non inplace - output = output + resize( - self.scale_heads[i](x[i]), - size=output.shape[2:], - mode='bilinear', - align_corners=self.align_corners) - - output = self.cls_seg(output) - return output diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/ldm/models/diffusion/sampling_util.py b/spaces/Mellow-ai/PhotoAI_Mellow/ldm/models/diffusion/sampling_util.py deleted file mode 100644 index 7eff02be6d7c54d43ee6680636ac0698dd3b3f33..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/ldm/models/diffusion/sampling_util.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch -import numpy as np - - -def append_dims(x, target_dims): - """Appends dimensions to the end of a tensor until it has target_dims dimensions. - From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py""" - dims_to_append = target_dims - x.ndim - if dims_to_append < 0: - raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less') - return x[(...,) + (None,) * dims_to_append] - - -def norm_thresholding(x0, value): - s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim) - return x0 * (value / s) - - -def spatial_norm_thresholding(x0, value): - # b c h w - s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value) - return x0 * (value / s) \ No newline at end of file diff --git a/spaces/MetaWabbit/Auto-GPT/autogpt/permanent_memory/__init__.py b/spaces/MetaWabbit/Auto-GPT/autogpt/permanent_memory/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/MikailDuzenli/vilt_demo/app.py b/spaces/MikailDuzenli/vilt_demo/app.py deleted file mode 100644 index 614547d7e780ef3077e703e10320590331e5055b..0000000000000000000000000000000000000000 --- a/spaces/MikailDuzenli/vilt_demo/app.py +++ /dev/null @@ -1,326 +0,0 @@ -import gradio as gr -import torch -import torch.nn.functional as F -import requests -import numpy as np -import re -import io -import matplotlib.pyplot as plt - -from PIL import Image -from transformers import ViltProcessor, ViltForMaskedLM -from torchvision import transforms - -processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-mlm") -model = ViltForMaskedLM.from_pretrained("dandelin/vilt-b32-mlm") - -device = "cuda:0" if torch.cuda.is_available() else "cpu" -model.to(device) - - -class MinMaxResize: - def __init__(self, shorter=800, longer=1333): - self.min = shorter - self.max = longer - - def __call__(self, x): - w, h = x.size - scale = self.min / min(w, h) - if h < w: - newh, neww = self.min, scale * w - else: - newh, neww = scale * h, self.min - - if max(newh, neww) > self.max: - scale = self.max / max(newh, neww) - newh = newh * scale - neww = neww * scale - - newh, neww = int(newh + 0.5), int(neww + 0.5) - newh, neww = newh // 32 * 32, neww // 32 * 32 - - return x.resize((neww, newh), resample=Image.Resampling.BICUBIC) - - -def pixelbert_transform(size=800): - longer = int((1333 / 800) * size) - return transforms.Compose( - [ - MinMaxResize(shorter=size, longer=longer), - transforms.ToTensor(), - transforms.Compose([transforms.Normalize( - mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]), - ] - ) - - -def cost_matrix_cosine(x, y, eps=1e-5): - """Compute cosine distnace across every pairs of x, y (batched) - [B, L_x, D] [B, L_y, D] -> [B, Lx, Ly]""" - assert x.dim() == y.dim() - assert x.size(0) == y.size(0) - assert x.size(2) == y.size(2) - x_norm = F.normalize(x, p=2, dim=-1, eps=eps) - y_norm = F.normalize(y, p=2, dim=-1, eps=eps) - cosine_sim = x_norm.matmul(y_norm.transpose(1, 2)) - cosine_dist = 1 - cosine_sim - return cosine_dist - - -@torch.no_grad() -def ipot(C, x_len, x_pad, y_len, y_pad, joint_pad, beta, iteration, k): - """ [B, M, N], [B], [B, M], [B], [B, N], [B, M, N]""" - b, m, n = C.size() - sigma = torch.ones(b, m, dtype=C.dtype, - device=C.device) / x_len.unsqueeze(1) - T = torch.ones(b, n, m, dtype=C.dtype, device=C.device) - A = torch.exp(-C.transpose(1, 2) / beta) - - # mask padded positions - sigma.masked_fill_(x_pad, 0) - joint_pad = joint_pad.transpose(1, 2) - T.masked_fill_(joint_pad, 0) - A.masked_fill_(joint_pad, 0) - - # broadcastable lengths - x_len = x_len.unsqueeze(1).unsqueeze(2) - y_len = y_len.unsqueeze(1).unsqueeze(2) - - # mask to zero out padding in delta and sigma - x_mask = (x_pad.to(C.dtype) * 1e4).unsqueeze(1) - y_mask = (y_pad.to(C.dtype) * 1e4).unsqueeze(1) - - for _ in range(iteration): - Q = A * T # bs * n * m - sigma = sigma.view(b, m, 1) - for _ in range(k): - delta = 1 / (y_len * Q.matmul(sigma).view(b, 1, n) + y_mask) - sigma = 1 / (x_len * delta.matmul(Q) + x_mask) - T = delta.view(b, n, 1) * Q * sigma - T.masked_fill_(joint_pad, 0) - return T - - -def get_model_embedding_and_mask(model, input_ids, pixel_values): - - input_shape = input_ids.size() - - text_batch_size, seq_length = input_shape - device = input_ids.device - attention_mask = torch.ones(((text_batch_size, seq_length)), device=device) - image_batch_size = pixel_values.shape[0] - image_token_type_idx = 1 - - if image_batch_size != text_batch_size: - raise ValueError( - "The text inputs and image inputs need to have the same batch size") - - pixel_mask = torch.ones((image_batch_size, model.vilt.config.image_size, - model.vilt.config.image_size), device=device) - - text_embeds = model.vilt.embeddings.text_embeddings( - input_ids=input_ids, token_type_ids=None, inputs_embeds=None) - - image_embeds, image_masks, patch_index = model.vilt.embeddings.visual_embed( - pixel_values=pixel_values, pixel_mask=pixel_mask, max_image_length=model.vilt.config.max_image_length - ) - text_embeds = text_embeds + model.vilt.embeddings.token_type_embeddings( - torch.zeros_like(attention_mask, dtype=torch.long, - device=text_embeds.device) - ) - image_embeds = image_embeds + model.vilt.embeddings.token_type_embeddings( - torch.full_like(image_masks, image_token_type_idx, - dtype=torch.long, device=text_embeds.device) - ) - - return text_embeds, image_embeds, attention_mask, image_masks, patch_index - - -def infer(url, mp_text, hidx): - try: - res = requests.get(url) - image = Image.open(io.BytesIO(res.content)).convert("RGB") - img = pixelbert_transform(size=500)(image) - img = img.unsqueeze(0).to(device) - except: - return False - - tl = len(re.findall("\[MASK\]", mp_text)) - inferred_token = [mp_text] - encoding = processor(image, mp_text, return_tensors="pt") - - with torch.no_grad(): - for i in range(tl): - encoded = processor.tokenizer(inferred_token) - input_ids = torch.tensor(encoded.input_ids) - encoded = encoded["input_ids"][0][1:-1] - outputs = model(input_ids=input_ids, - pixel_values=encoding.pixel_values) - mlm_logits = outputs.logits[0] # shape (seq_len, vocab_size) - - # only take into account text features (minus CLS and SEP token) - mlm_logits = mlm_logits[1: input_ids.shape[1] - 1, :] - mlm_values, mlm_ids = mlm_logits.softmax(dim=-1).max(dim=-1) - - # only take into account text - mlm_values[torch.tensor(encoded) != 103] = 0 - select = mlm_values.argmax().item() - encoded[select] = mlm_ids[select].item() - inferred_token = [processor.decode(encoded)] - - encoded = processor.tokenizer(inferred_token) - output = processor.decode(encoded.input_ids[0], skip_special_tokens=True) - selected_token = '' - result = Image.open('no_heatmap.jpg') - - if hidx > 0 and hidx < len(encoded["input_ids"][0][:-1]): - input_ids = torch.tensor(encoded.input_ids) - outputs = model( - input_ids=input_ids, pixel_values=encoding.pixel_values, output_hidden_states=True) - - txt_emb, img_emb, text_masks, image_masks, patch_index = get_model_embedding_and_mask( - model, input_ids=input_ids, pixel_values=encoding.pixel_values) - - embedding_output = torch.cat([txt_emb, img_emb], dim=1) - attention_mask = torch.cat([text_masks, image_masks], dim=1) - - extended_attention_mask = model.vilt.get_extended_attention_mask( - attention_mask, input_ids.size(), device=device) - - encoder_outputs = model.vilt.encoder( - embedding_output, - attention_mask=extended_attention_mask, - head_mask=None, - output_attentions=False, - output_hidden_states=True, - return_dict=True, - ) - - x = encoder_outputs.hidden_states[-1] - x = model.vilt.layernorm(x) - - txt_emb, img_emb = ( - x[:, :txt_emb.shape[1]], - x[:, txt_emb.shape[1]:], - ) - - txt_mask, img_mask = ( - text_masks.bool(), - image_masks.bool(), - ) - - for i, _len in enumerate(txt_mask.sum(dim=1)): - txt_mask[i, _len - 1] = False - txt_mask[:, 0] = False - img_mask[:, 0] = False - txt_pad, img_pad = ~txt_mask, ~img_mask - cost = cost_matrix_cosine(txt_emb.float(), img_emb.float()) - joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2) - cost.masked_fill_(joint_pad, 0) - - txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, - keepdim=False)).to(dtype=cost.dtype) - img_len = (img_pad.size(1) - img_pad.sum(dim=1, - keepdim=False)).to(dtype=cost.dtype) - T = ipot(cost.detach(), - txt_len, - txt_pad, - img_len, - img_pad, - joint_pad, - 0.1, - 1000, - 1, - ) - plan = T[0] - plan_single = plan * len(txt_emb) - cost_ = plan_single.t() - - cost_ = cost_[hidx][1:].cpu() - - patch_index, (H, W) = patch_index - heatmap = torch.zeros(H, W) - for i, pidx in enumerate(patch_index[0]): - h, w = pidx[0].item(), pidx[1].item() - heatmap[h, w] = cost_[i] - - heatmap = (heatmap - heatmap.mean()) / heatmap.std() - heatmap = np.clip(heatmap, 1.0, 3.0) - heatmap = (heatmap - heatmap.min()) / (heatmap.max() - heatmap.min()) - - _w, _h = image.size - overlay = Image.fromarray(np.uint8(heatmap * 255), "L").resize( - (_w, _h), resample=Image.Resampling.NEAREST - ) - image_rgba = image.copy() - image_rgba.putalpha(overlay) - result = image_rgba - - selected_token = processor.tokenizer.convert_ids_to_tokens( - encoded["input_ids"][0][hidx] - ) - - return [np.array(image), output, selected_token, result] - - -title = "What's in the picture ?" - -description = """ -Can't find your words to describe an image ? The pre-trained -ViLT model will help you. Give the url of an image and a caption with [MASK] tokens to be filled or play with the given examples ! -You can even see where the model focused its attention for a given word : just choose the index of the selected word with the slider. -""" - - -inputs_interface = [ - gr.inputs.Textbox( - label="Url of an image.", - lines=5, - ), - gr.inputs.Textbox( - label="Caption with [MASK] tokens to be filled.", lines=5), - gr.inputs.Slider( - minimum=0, - maximum=38, - step=1, - label="Index of token for heatmap visualization (ignored if zero)", - ), -] -outputs_interface = [ - gr.outputs.Image(label="Image"), - gr.outputs.Textbox(label="description"), - gr.outputs.Textbox(label="selected token"), - gr.outputs.Image(label="Heatmap") -] - -interface = gr.Interface( - fn=infer, - inputs=inputs_interface, - outputs=outputs_interface, - title=title, - description=description, - server_name="0.0.0.0", - server_port=8888, - examples=[ - [ - "https://s3.geograph.org.uk/geophotos/06/21/24/6212487_1cca7f3f_1024x1024.jpg", - "a display of flowers growing out and over the [MASK] [MASK] in front of [MASK] on a [MASK] [MASK].", - 0, - ], - - [ - "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT5W71UTcSBm3r5l9NzBemglq983bYvKOHRkw&usqp=CAU", - "An [MASK] with the [MASK] in the [MASK].", - 5, - ], - - [ - "https://www.referenseo.com/wp-content/uploads/2019/03/image-attractive-960x540.jpg", - "An [MASK] is flying with a [MASK] over a [MASK].", - 2, - ], - ], -) - - -interface.launch() diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/kie/sdmgr/sdmgr_unet16_60e_wildreceipt.py b/spaces/Mountchicken/MAERec-Gradio/configs/kie/sdmgr/sdmgr_unet16_60e_wildreceipt.py deleted file mode 100644 index d49cbbc33798e815a24cb29cf3bc008460948c88..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/kie/sdmgr/sdmgr_unet16_60e_wildreceipt.py +++ /dev/null @@ -1,29 +0,0 @@ -_base_ = [ - '../_base_/default_runtime.py', - '../_base_/datasets/wildreceipt.py', - '../_base_/schedules/schedule_adam_60e.py', - '_base_sdmgr_unet16.py', -] - -wildreceipt_train = _base_.wildreceipt_train -wildreceipt_train.pipeline = _base_.train_pipeline -wildreceipt_test = _base_.wildreceipt_test -wildreceipt_test.pipeline = _base_.test_pipeline - -train_dataloader = dict( - batch_size=4, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=wildreceipt_train) - -val_dataloader = dict( - batch_size=1, - num_workers=1, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=wildreceipt_test) - -test_dataloader = val_dataloader - -auto_scale_lr = dict(base_batch_size=4) diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/fcenet/fcenet_resnet50_fpn_1500e_icdar2015.py b/spaces/Mountchicken/MAERec-Gradio/configs/textdet/fcenet/fcenet_resnet50_fpn_1500e_icdar2015.py deleted file mode 100644 index 8257a046314dc7d671eb28714e42fb6d70f2b8e0..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/fcenet/fcenet_resnet50_fpn_1500e_icdar2015.py +++ /dev/null @@ -1,37 +0,0 @@ -_base_ = [ - '_base_fcenet_resnet50_fpn.py', - '../_base_/datasets/icdar2015.py', - '../_base_/default_runtime.py', - '../_base_/schedules/schedule_sgd_base.py', -] - -optim_wrapper = dict(optimizer=dict(lr=1e-3, weight_decay=5e-4)) -train_cfg = dict(max_epochs=1500) -# learning policy -param_scheduler = [ - dict(type='PolyLR', power=0.9, eta_min=1e-7, end=1500), -] - -# dataset settings -icdar2015_textdet_train = _base_.icdar2015_textdet_train -icdar2015_textdet_test = _base_.icdar2015_textdet_test -icdar2015_textdet_train.pipeline = _base_.train_pipeline -icdar2015_textdet_test.pipeline = _base_.test_pipeline - -train_dataloader = dict( - batch_size=8, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=icdar2015_textdet_train) - -val_dataloader = dict( - batch_size=1, - num_workers=1, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=icdar2015_textdet_test) - -test_dataloader = val_dataloader - -auto_scale_lr = dict(base_batch_size=8) diff --git a/spaces/NCTCMumbai/NCTC/models/.github/ISSUE_TEMPLATE/10-official-documentation-issue.md b/spaces/NCTCMumbai/NCTC/models/.github/ISSUE_TEMPLATE/10-official-documentation-issue.md deleted file mode 100644 index 00d79a16916c327d2d8a729791db7d7d3d96b735..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/.github/ISSUE_TEMPLATE/10-official-documentation-issue.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: "[Official Model] Documentation Issue" -about: Use this template for reporting a documentation issue for the “official” directory -labels: type:docs,models:official - ---- - -# Prerequisites - -Please answer the following question for yourself before submitting an issue. - -- [ ] I checked to make sure that this issue has not been filed already. - -## 1. The entire URL of the documentation with the issue - -https://github.com/tensorflow/models/tree/master/official/... - -## 2. Describe the issue - -A clear and concise description of what needs to be changed. diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/masked_softmax_test.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/masked_softmax_test.py deleted file mode 100644 index befe0f786a7b4d84a5dc975d1780acdd2c964a2c..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/masked_softmax_test.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for Keras-based masked softmax layer.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import -from official.nlp.modeling.layers import masked_softmax - - -# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It -# guarantees forward compatibility of this code for the V2 switchover. -@keras_parameterized.run_all_keras_modes -class MaskedSoftmaxLayerTest(keras_parameterized.TestCase): - - def test_non_masked_softmax(self): - test_layer = masked_softmax.MaskedSoftmax() - input_tensor = tf.keras.Input(shape=(4, 8)) - output = test_layer(input_tensor) - model = tf.keras.Model(input_tensor, output) - - input_data = 10 * np.random.random_sample((3, 4, 8)) - output_data = model.predict(input_data) - expected_data = tf.nn.softmax(input_data) - self.assertAllClose(expected_data, output_data) - - def test_masked_softmax(self): - test_layer = masked_softmax.MaskedSoftmax() - input_tensor = tf.keras.Input(shape=(4, 8)) - mask_tensor = tf.keras.Input(shape=(4, 8)) - output = test_layer(input_tensor, mask_tensor) - model = tf.keras.Model([input_tensor, mask_tensor], output) - - input_data = 10 * np.random.random_sample((3, 4, 8)) - mask_data = np.random.randint(2, size=(3, 4, 8)) - - output_data = model.predict([input_data, mask_data]) - expected_zeros = np.greater(mask_data, 0) - is_zeros = np.greater(output_data, 0) - self.assertAllEqual(expected_zeros, is_zeros) - - def test_masked_softmax_with_none_mask(self): - test_layer = masked_softmax.MaskedSoftmax() - input_tensor = tf.keras.Input(shape=(4, 8)) - output = test_layer(input_tensor, None) - model = tf.keras.Model(input_tensor, output) - - input_data = 10 * np.random.random_sample((3, 4, 8)) - output_data = model.predict(input_data) - expected_data = tf.nn.softmax(input_data) - self.assertAllClose(expected_data, output_data) - - def test_softmax_with_axes_expansion(self): - test_layer = masked_softmax.MaskedSoftmax(mask_expansion_axes=[1]) - input_tensor = tf.keras.Input(shape=(4, 8)) - mask_tensor = tf.keras.Input(shape=(8)) - output = test_layer(input_tensor, mask_tensor) - model = tf.keras.Model([input_tensor, mask_tensor], output) - - input_data = 10 * np.random.random_sample((3, 4, 8)) - mask_data = np.random.randint(2, size=(3, 8)) - - output_data = model.predict([input_data, mask_data]) - expanded_mask = np.expand_dims(mask_data, axis=1) * np.ones_like(input_data) - expected_zeros = np.greater(expanded_mask, 0) - is_zeros = np.greater(output_data, 0) - self.assertAllEqual(expected_zeros, is_zeros) - - def test_masked_softmax_high_dims(self): - test_layer = masked_softmax.MaskedSoftmax( - mask_expansion_axes=[1], normalization_axes=[6, 7]) - input_shape = [2, 3, 4, 5, 6, 7, 8] - mask_shape = [5, 6, 7, 8] - input_tensor = tf.keras.Input(shape=input_shape) - mask_tensor = tf.keras.Input(shape=mask_shape) - output = test_layer(input_tensor, mask_tensor) - model = tf.keras.Model([input_tensor, mask_tensor], output) - - input_data = 10 * np.random.random_sample([3] + input_shape) - mask_data = np.random.randint(2, size=[3] + mask_shape) - - output_data = model.predict([input_data, mask_data]) - expanded_mask = np.expand_dims(mask_data, axis=1) - expanded_mask = np.expand_dims(expanded_mask, axis=1) - expanded_mask = np.expand_dims( - expanded_mask, axis=1) * np.ones_like(input_data) - expected_zeros = np.greater(expanded_mask, 0) - is_zeros = np.greater(output_data, 0) - self.assertAllEqual(expected_zeros, is_zeros) - - def test_serialize_deserialize(self): - test_layer = masked_softmax.MaskedSoftmax( - mask_expansion_axes=[1], normalization_axes=[6, 7]) - new_layer = masked_softmax.MaskedSoftmax.from_config( - test_layer.get_config()) - - # If the serialization was successful, the new config should match the old. - self.assertAllEqual(test_layer.get_config(), new_layer.get_config()) - - -if __name__ == '__main__': - tf.test.main() diff --git a/spaces/NSect/VALL-E-X/models/macros.py b/spaces/NSect/VALL-E-X/models/macros.py deleted file mode 100644 index cbc54966f43b2ef27d87c3b4bc69cb866d2b8fd0..0000000000000000000000000000000000000000 --- a/spaces/NSect/VALL-E-X/models/macros.py +++ /dev/null @@ -1,11 +0,0 @@ -# Text -NUM_TEXT_TOKENS = 2048 - -# Audio -NUM_AUDIO_TOKENS = 1024 # EnCodec RVQ bins -NUM_MEL_BINS = 100 # BigVGAN bigvgan_24khz_100band - - -# Speaker -NUM_SPEAKER_CLASSES = 4096 -SPEAKER_EMBEDDING_DIM = 64 diff --git a/spaces/OAOA/DifFace/basicsr/ops/upfirdn2d/upfirdn2d.py b/spaces/OAOA/DifFace/basicsr/ops/upfirdn2d/upfirdn2d.py deleted file mode 100644 index d6122d59aa32fd52e956bd36200ba79af4a17b17..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/basicsr/ops/upfirdn2d/upfirdn2d.py +++ /dev/null @@ -1,192 +0,0 @@ -# modify from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.py # noqa:E501 - -import os -import torch -from torch.autograd import Function -from torch.nn import functional as F - -BASICSR_JIT = os.getenv('BASICSR_JIT') -if BASICSR_JIT == 'True': - from torch.utils.cpp_extension import load - module_path = os.path.dirname(__file__) - upfirdn2d_ext = load( - 'upfirdn2d', - sources=[ - os.path.join(module_path, 'src', 'upfirdn2d.cpp'), - os.path.join(module_path, 'src', 'upfirdn2d_kernel.cu'), - ], - ) -else: - try: - from . import upfirdn2d_ext - except ImportError: - pass - # avoid annoying print output - # print(f'Cannot import deform_conv_ext. Error: {error}. You may need to: \n ' - # '1. compile with BASICSR_EXT=True. or\n ' - # '2. set BASICSR_JIT=True during running') - - -class UpFirDn2dBackward(Function): - - @staticmethod - def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): - - up_x, up_y = up - down_x, down_y = down - g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad - - grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) - - grad_input = upfirdn2d_ext.upfirdn2d( - grad_output, - grad_kernel, - down_x, - down_y, - up_x, - up_y, - g_pad_x0, - g_pad_x1, - g_pad_y0, - g_pad_y1, - ) - grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) - - ctx.save_for_backward(kernel) - - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - ctx.up_x = up_x - ctx.up_y = up_y - ctx.down_x = down_x - ctx.down_y = down_y - ctx.pad_x0 = pad_x0 - ctx.pad_x1 = pad_x1 - ctx.pad_y0 = pad_y0 - ctx.pad_y1 = pad_y1 - ctx.in_size = in_size - ctx.out_size = out_size - - return grad_input - - @staticmethod - def backward(ctx, gradgrad_input): - kernel, = ctx.saved_tensors - - gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1) - - gradgrad_out = upfirdn2d_ext.upfirdn2d( - gradgrad_input, - kernel, - ctx.up_x, - ctx.up_y, - ctx.down_x, - ctx.down_y, - ctx.pad_x0, - ctx.pad_x1, - ctx.pad_y0, - ctx.pad_y1, - ) - # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], - # ctx.out_size[1], ctx.in_size[3]) - gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) - - return gradgrad_out, None, None, None, None, None, None, None, None - - -class UpFirDn2d(Function): - - @staticmethod - def forward(ctx, input, kernel, up, down, pad): - up_x, up_y = up - down_x, down_y = down - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - kernel_h, kernel_w = kernel.shape - _, channel, in_h, in_w = input.shape - ctx.in_size = input.shape - - input = input.reshape(-1, in_h, in_w, 1) - - ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - ctx.out_size = (out_h, out_w) - - ctx.up = (up_x, up_y) - ctx.down = (down_x, down_y) - ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) - - g_pad_x0 = kernel_w - pad_x0 - 1 - g_pad_y0 = kernel_h - pad_y0 - 1 - g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 - g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 - - ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) - - out = upfirdn2d_ext.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) - # out = out.view(major, out_h, out_w, minor) - out = out.view(-1, channel, out_h, out_w) - - return out - - @staticmethod - def backward(ctx, grad_output): - kernel, grad_kernel = ctx.saved_tensors - - grad_input = UpFirDn2dBackward.apply( - grad_output, - kernel, - grad_kernel, - ctx.up, - ctx.down, - ctx.pad, - ctx.g_pad, - ctx.in_size, - ctx.out_size, - ) - - return grad_input, None, None, None, None - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - if input.device.type == 'cpu': - out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) - else: - out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) - - return out - - -def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) - out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :, ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - - return out.view(-1, channel, out_h, out_w) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/lightconv_layer/cuda_function_gen.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/lightconv_layer/cuda_function_gen.py deleted file mode 100644 index a25433dd8edae2f0b52d7d0eeeb829cabc6b4b89..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/modules/lightconv_layer/cuda_function_gen.py +++ /dev/null @@ -1,289 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -def gen_forward(): - - kernels = [3, 5, 7, 15, 31, 63, 127, 255] - seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] - - head = """ -/** - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include "lightconv_cuda.cuh" - -std::vector lightconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l) { - - at::DeviceGuard g(input.device()); - const auto minibatch = input.size(0); - const auto numFeatures = input.size(1); - const auto sequenceLength = input.size(2); - - const auto numHeads = filters.size(0); - const auto filterSize = filters.size(1); - - const auto numFiltersInBlock = numFeatures / numHeads; - - const dim3 blocks(minibatch, numFeatures); - - auto output = at::zeros_like(input); - auto stream = at::cuda::getCurrentCUDAStream(); -""" - - sequence_if = """ - if (sequenceLength <= {seq}) {{ - switch(filterSize) {{ -""" - - case_k = """ - case {k}: -""" - - main_block = """ - if (padding_l == {pad}) {{ - AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_forward", ([&] {{ - lightconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t> - <<>>( - input.data(), - filters.data(), - minibatch, - sequenceLength, - numFeatures, - numFiltersInBlock, - output.data()); - }})); - }} else -""" - - bad_padding = """ - { - std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl; - } - break; -""" - - bad_filter = """ - default: - std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl; - } -""" - - con_else = """ - } else -""" - - final_else = """ - { - switch(filterSize) { -""" - - final_return = """ - } - - return {output}; -} -""" - - with open("lightconv_cuda_forward.cu", "w") as forward: - forward.write(head) - for seq in seqs: - forward.write(sequence_if.format(seq=seq)) - for k in kernels: - forward.write(case_k.format(k=k)) - for pad in [k // 2, k - 1]: - forward.write(main_block.format(k=k, b_size=seq, pad=pad)) - forward.write(bad_padding) - forward.write(bad_filter) - forward.write(con_else) - - forward.write(final_else) - for k in kernels: - forward.write(case_k.format(k=k)) - for pad in [k // 2, k - 1]: - forward.write(main_block.format(k=k, b_size=seq, pad=pad)) - forward.write(bad_padding) - forward.write(bad_filter) - forward.write(final_return) - - -def gen_backward(): - - head = """ -/** - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include "lightconv_cuda.cuh" - -std::vector lightconv_cuda_backward( - at::Tensor gradOutput, - int padding_l, - at::Tensor input, - at::Tensor filters) { - - // gradWrtInput - const int minibatch = input.size(0); - const int numFeatures = input.size(1); - const int sequenceLength = input.size(2); - - const int numHeads = filters.size(0); - const int filterSize = filters.size(1); - - const dim3 gradBlocks(minibatch, numFeatures); - const dim3 weightGradFirstpassShortBlocks(minibatch, numHeads); - const dim3 weightGradSecondpassBlocks(numHeads, filterSize); - - const int numFiltersInBlock = numFeatures / numHeads; - - auto gradInput = at::zeros_like(input); - auto gradFilters = at::zeros_like(filters); - - at::DeviceGuard g(input.device()); - auto stream = at::cuda::getCurrentCUDAStream(); - - switch(filterSize) { -""" - - sequence_if = """ - if (sequenceLength <= {seq}) {{ -""" - - case_k = """ - case {k}: -""" - - main_block = """ - if (padding_l == {p}) {{ - AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_backward", ([&] {{ - lightconv_grad_wrt_input_kernel<{k}, {b_size}, {p}, scalar_t> - <<>>( - gradOutput.data(), - filters.data(), - minibatch, - sequenceLength, - numFeatures, - numFiltersInBlock, - gradInput.data()); - -""" - - weight_grad_short = """ - at::Tensor tempSumGradFilters = at::zeros({{minibatch, numHeads, filterSize}}, input.options().dtype(at::kFloat)); - lightconv_grad_wrt_weights_firstpass_short_kernel<{k}, {b_size}, {p}, scalar_t> - <<>>( - input.data(), - gradOutput.data(), - minibatch, - sequenceLength, - numFeatures, - numFiltersInBlock, - numHeads, - tempSumGradFilters.data() - ); - - lightconv_grad_wrt_weights_secondpass_short_kernel<{k}, {b_size}, scalar_t> - <<>>( - tempSumGradFilters.data(), - minibatch, - numFiltersInBlock, - gradFilters.data() - ); - }})); - }} else -""" - - weight_grad = """ - at::Tensor tempSumGradFilters = at::zeros({{minibatch, numFeatures, filterSize}}, input.options().dtype(at::kFloat)); - lightconv_grad_wrt_weights_firstpass_kernel<{k}, {b_size}, {p}, scalar_t> - <<>>( - input.data(), - gradOutput.data(), - minibatch, - sequenceLength, - numFeatures, - numFiltersInBlock, - tempSumGradFilters.data() - ); - - lightconv_grad_wrt_weights_secondpass_kernel<{k}, {b_size}, scalar_t> - <<>>( - tempSumGradFilters.data(), - minibatch, - numFiltersInBlock, - gradFilters.data() - ); - }})); - }} else -""" - - bad_padding = """ - { - std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl; - } -""" - - breakout = """ - break; -""" - - bad_filter = """ - default: - std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl; -""" - - con_else = """ - } else -""" - - final_else = """ - { - switch(filterSize) { -""" - - last_return = """ - } - return {gradInput, gradFilters}; -} -""" - - kernels = [3, 5, 7, 15, 31, 63, 127, 255] - seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]] - thresh = [32, 32, 64, 128, 256, -1, -1, -1] - max_mem = [-1, -1, -1, -1, -1, 192, 96, 64] - - with open("lightconv_cuda_backward.cu", "w") as backward: - backward.write(head) - for (k, t, mem) in zip(kernels, thresh, max_mem): - backward.write(case_k.format(k=k)) - for seq in seqs: - if (t == -1 or seq <= t) and (mem == -1 or seq < mem): - backward.write(sequence_if.format(seq=seq)) - for p in [k // 2, k - 1]: - backward.write(main_block.format(k=k, b_size=seq, p=p)) - backward.write(weight_grad_short.format(k=k, b_size=seq, p=p)) - backward.write(bad_padding) - else: - for p in [k // 2, k - 1]: - backward.write(main_block.format(k=k, b_size=32, p=p)) - backward.write(weight_grad.format(k=k, b_size=32, p=p)) - backward.write(bad_padding) - backward.write(breakout) - break - backward.write(con_else) - backward.write(bad_filter) - backward.write(last_return) - - -if __name__ == "__main__": - gen_forward() - gen_backward() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/CODE_OF_CONDUCT.md b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/CODE_OF_CONDUCT.md deleted file mode 100644 index a0cbeaab7650bf08267fbdbc9bb54e845c88f392..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,77 +0,0 @@ -# Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to make participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies within all project spaces, and it also applies when -an individual is representing the project or its community in public spaces. -Examples of representing a project or community include using an official -project e-mail address, posting via an official social media account, or acting -as an appointed representative at an online or offline event. Representation of -a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at . All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq - diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/criss/README.md b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/criss/README.md deleted file mode 100644 index 4689ed7c10497a5100b28fe6d6801a7c089da569..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/criss/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# Cross-lingual Retrieval for Iterative Self-Supervised Training - -https://arxiv.org/pdf/2006.09526.pdf - -## Introduction - -CRISS is a multilingual sequence-to-sequnce pretraining method where mining and training processes are applied iteratively, improving cross-lingual alignment and translation ability at the same time. - -## Requirements: - -* faiss: https://github.com/facebookresearch/faiss -* mosesdecoder: https://github.com/moses-smt/mosesdecoder -* flores: https://github.com/facebookresearch/flores -* LASER: https://github.com/facebookresearch/LASER - -## Unsupervised Machine Translation -##### 1. Download and decompress CRISS checkpoints -``` -cd examples/criss -wget https://dl.fbaipublicfiles.com/criss/criss_3rd_checkpoints.tar.gz -tar -xf criss_checkpoints.tar.gz -``` -##### 2. Download and preprocess Flores test dataset -Make sure to run all scripts from examples/criss directory -``` -bash download_and_preprocess_flores_test.sh -``` - -##### 3. Run Evaluation on Sinhala-English -``` -bash unsupervised_mt/eval.sh -``` - -## Sentence Retrieval -##### 1. Download and preprocess Tatoeba dataset -``` -bash download_and_preprocess_tatoeba.sh -``` - -##### 2. Run Sentence Retrieval on Tatoeba Kazakh-English -``` -bash sentence_retrieval/sentence_retrieval_tatoeba.sh -``` - -## Mining -##### 1. Install faiss -Follow instructions on https://github.com/facebookresearch/faiss/blob/master/INSTALL.md -##### 2. Mine pseudo-parallel data between Kazakh and English -``` -bash mining/mine_example.sh -``` - -## Citation -```bibtex -@article{tran2020cross, - title={Cross-lingual retrieval for iterative self-supervised training}, - author={Tran, Chau and Tang, Yuqing and Li, Xian and Gu, Jiatao}, - journal={arXiv preprint arXiv:2006.09526}, - year={2020} -} -``` diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/model_parallel/models/roberta/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/model_parallel/models/roberta/__init__.py deleted file mode 100644 index 117827c3e9c176477f33e3a6fd7fe19a922411a2..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/model_parallel/models/roberta/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .model import * # noqa diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/megatron_11b/README.md b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/megatron_11b/README.md deleted file mode 100644 index 945c96c91e2e2d93466abc28d90bc25a1e7dd471..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/megatron_11b/README.md +++ /dev/null @@ -1,161 +0,0 @@ -# Megatron-11b - -Megatron-11b is a unidirectional language model with `11B` parameters based on [Megatron-LM](https://arxiv.org/pdf/1909.08053.pdf). Following the original Megatron work, we trained the model using intra-layer model parallelism with each layer's parameters split across 8 GPUs. - -Megatron-11b is trained on the same data and uses the same byte-pair encoding (BPE) as [RoBERTa](https://arxiv.org/pdf/1907.11692.pdf). - -## Pre-trained models - -Model | Description | # params | # filesize | Download ----|---|---|---|--- -`megatron_11b` | megatron_11b unidirectional language model | 11B | 19Gb | [megatron_11b.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/model_parallel/megatron_11b.tar.gz) - -#### Architecture: - -Param | Value ----|--- -embed_dim | 3072 -ffn_dim | 3072 * 6 -layers | 72 -attention heads | 32 - -#### Training details: - -Param | value ----|--- -bsz | 512 -num_updates | 300,000 -peak_lr | 1.5e-04 -lr scheduler | inverse_sqrt -clip norm | 0.0 - - -## Example training command (model parallel) - -Megatron-11b contains too many parameters to train on a single GPU. Following -the original Megatron work, we adopt an intra-layer model parallel training -approach in which each layer's parameters are split across multiple GPUs and -activations and gradients are communicated during the forward/backward pass, -respectively. We similarly split the loss computation using the -`vocab_parallel_cross_entropy` criterion. - -The following training command illustrates how to do model parallel training in -fairseq. We assume that each machine (node) has 8 GPUs among which to split the -model parameters (`--model-parallel-size 8`). If you have access to multiple -nodes, you may combine this with data parallel training by increasing -`--distributed-world-size`. - -To train Megatron-11b on a single node: - - -```bash -fairseq-train \ - --distributed-world-size 8 \ - --memory-efficient-fp16 \ - --num-workers 2 \ - --model-parallel-size 8 \ - --criterion vocab_parallel_cross_entropy \ - --task language_modeling \ - --sample-break-mode none \ - --tokens-per-sample 1024 \ - --arch transformer_lm_megatron_11b \ - --share-decoder-input-output-embed \ - --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-08 --clip-norm 0.0 \ - --lr-scheduler inverse_sqrt --lr 0.00015 \ - --warmup-updates 3000 --weight-decay 0.01 \ - --dropout 0.1 --attention-dropout 0.1 \ - --batch-size 2 \ - --max-update 300000; -``` - -Note: Above was tested on `DGX-1` box, with `8xV100-32Gb` GPUs. - -## Results - -**[Wikitext103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/)** - -Model | Valid perplexity | Test perplexity ----|---|--- -`megatron_11b` | 10.64 | 10.54 - - -## Evaluating `megatron_11b` on Wikitext-103 - -#### 1. Downloading Megatron-11b -```bash -# WARNING: this file is 19GB -wget https://dl.fbaipublicfiles.com/fairseq/models/model_parallel/megatron_11b.tar.gz -tar -xzvf megatron_11b.tar.gz -``` - -#### 2. Download Wikitext-103 -```bash -wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip -unzip wikitext-103-raw-v1.zip -``` - -#### 3. Detokenize test tokens -Megatron-11b uses a byte-level BPE that expects raw (untokenized) input. Since -the wikitext-103 dataset comes tokenized, we apply a simple detokenization -process to restore the untokenized test set: - -```bash -python -m examples.megatron_11b.detok wikitext-103-raw/wiki.test.raw > wikitext-103-raw/wiki.test.detok -``` - -#### 4. BPE encoding -```bash -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json' -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe' - -python -m examples.roberta.multiprocessing_bpe_encoder \ - --encoder-json encoder.json \ - --vocab-bpe vocab.bpe \ - --inputs "wikitext-103-raw/wiki.test.detok" \ - --outputs "wikitext-103-raw/wiki.test.bpe" \ - --workers 60; -``` - -#### 5. Fairseq binarize -```bash -fairseq-preprocess \ - --only-source \ - --testpref wikitext-103-raw/wiki.test.bpe \ - --srcdict megatron_11b/dict.txt \ - --destdir wikitext103-bin; -``` - -#### 6. Evaluating perplexity. -We can now evaluate perplexity on the test set. Note that because we've modified -the test set (via detokenization and BPE), the perplexity reported by -`fairseq-eval-lm` needs to be renormalized. - -Compute unnormalized perplexity: - -```bash -DATA_PATH=wikitext103-bin/ -fairseq-eval-lm \ - $DATA_PATH \ - --path megatron_11b/model.pt \ - --task language_modeling \ - --gen-subset test \ - --batch-size 8 \ - --criterion cross_entropy \ - --context-window 992 \ - --distributed-world-size 8 \ - --model-parallel-size 8; -# Expected PPL (unnormalized_ppl): [8.46] -# Note: the eval command needs to run on 8 GPUs for the released model -``` -Renormalizing formula: `2 ^ ( log_2(unnormalized_PPL) * (270847 / 245566))`. -PPL After normalization: `10.54` - -To renormalize the perplexity, we must account for the change in token count -after detokenizing and appling BPE. The formula for this is: -`2 ^ ( log_2(unnormalized_PPL) * (new_token_cnt / orig_token_cnt))` - -For the wikitext-103 test set, the original token count is `245566` and the -token count after detokenization and applying BPE is `270847`. - -The perplexity after renormalization is: -`2 ^ ( log_2(8.46) * (270847 / 245566)) = 10.54` diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/translation/prepare-iwslt14.sh b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/translation/prepare-iwslt14.sh deleted file mode 100644 index 2fb6643fbccb58701dcbb77d91430e68a821ba38..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/translation/prepare-iwslt14.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env bash -# -# Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh - -echo 'Cloning Moses github repository (for tokenization scripts)...' -git clone https://github.com/moses-smt/mosesdecoder.git - -echo 'Cloning Subword NMT repository (for BPE pre-processing)...' -git clone https://github.com/rsennrich/subword-nmt.git - -SCRIPTS=mosesdecoder/scripts -TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl -LC=$SCRIPTS/tokenizer/lowercase.perl -CLEAN=$SCRIPTS/training/clean-corpus-n.perl -BPEROOT=subword-nmt/subword_nmt -BPE_TOKENS=10000 - -URL="http://dl.fbaipublicfiles.com/fairseq/data/iwslt14/de-en.tgz" -GZ=de-en.tgz - -if [ ! -d "$SCRIPTS" ]; then - echo "Please set SCRIPTS variable correctly to point to Moses scripts." - exit -fi - -src=de -tgt=en -lang=de-en -prep=iwslt14.tokenized.de-en -tmp=$prep/tmp -orig=orig - -mkdir -p $orig $tmp $prep - -echo "Downloading data from ${URL}..." -cd $orig -wget "$URL" - -if [ -f $GZ ]; then - echo "Data successfully downloaded." -else - echo "Data not successfully downloaded." - exit -fi - -tar zxvf $GZ -cd .. - -echo "pre-processing train data..." -for l in $src $tgt; do - f=train.tags.$lang.$l - tok=train.tags.$lang.tok.$l - - cat $orig/$lang/$f | \ - grep -v '' | \ - grep -v '' | \ - grep -v '' | \ - sed -e 's///g' | \ - sed -e 's/<\/title>//g' | \ - sed -e 's/<description>//g' | \ - sed -e 's/<\/description>//g' | \ - perl $TOKENIZER -threads 8 -l $l > $tmp/$tok - echo "" -done -perl $CLEAN -ratio 1.5 $tmp/train.tags.$lang.tok $src $tgt $tmp/train.tags.$lang.clean 1 175 -for l in $src $tgt; do - perl $LC < $tmp/train.tags.$lang.clean.$l > $tmp/train.tags.$lang.$l -done - -echo "pre-processing valid/test data..." -for l in $src $tgt; do - for o in `ls $orig/$lang/IWSLT14.TED*.$l.xml`; do - fname=${o##*/} - f=$tmp/${fname%.*} - echo $o $f - grep '<seg id' $o | \ - sed -e 's/<seg id="[0-9]*">\s*//g' | \ - sed -e 's/\s*<\/seg>\s*//g' | \ - sed -e "s/\’/\'/g" | \ - perl $TOKENIZER -threads 8 -l $l | \ - perl $LC > $f - echo "" - done -done - - -echo "creating train, valid, test..." -for l in $src $tgt; do - awk '{if (NR%23 == 0) print $0; }' $tmp/train.tags.de-en.$l > $tmp/valid.$l - awk '{if (NR%23 != 0) print $0; }' $tmp/train.tags.de-en.$l > $tmp/train.$l - - cat $tmp/IWSLT14.TED.dev2010.de-en.$l \ - $tmp/IWSLT14.TEDX.dev2012.de-en.$l \ - $tmp/IWSLT14.TED.tst2010.de-en.$l \ - $tmp/IWSLT14.TED.tst2011.de-en.$l \ - $tmp/IWSLT14.TED.tst2012.de-en.$l \ - > $tmp/test.$l -done - -TRAIN=$tmp/train.en-de -BPE_CODE=$prep/code -rm -f $TRAIN -for l in $src $tgt; do - cat $tmp/train.$l >> $TRAIN -done - -echo "learn_bpe.py on ${TRAIN}..." -python $BPEROOT/learn_bpe.py -s $BPE_TOKENS < $TRAIN > $BPE_CODE - -for L in $src $tgt; do - for f in train.$L valid.$L test.$L; do - echo "apply_bpe.py to ${f}..." - python $BPEROOT/apply_bpe.py -c $BPE_CODE < $tmp/$f > $prep/$f - done -done diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/benchmark/dummy_mt.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/benchmark/dummy_mt.py deleted file mode 100644 index 4ca7be93a38d8d2b47685b74b4f8b8f9dcb03d2e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/benchmark/dummy_mt.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging - -import numpy as np -import torch -from fairseq.data import Dictionary, FairseqDataset -from fairseq.tasks import LegacyFairseqTask, register_task - - -logger = logging.getLogger(__name__) - - -@register_task("dummy_mt") -class DummyMTTask(LegacyFairseqTask): - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - parser.add_argument("--dict-size", default=49996, type=int) - parser.add_argument("--dataset-size", default=100000, type=int) - parser.add_argument("--src-len", default=30, type=int) - parser.add_argument("--tgt-len", default=30, type=int) - - def __init__(self, args, dictionary): - super().__init__(args) - self.dictionary = dictionary - self.seed = args.seed - - dictionary.pad_to_multiple_(8) # often faster if divisible by 8 - - self.dummy_src = torch.arange(args.src_len + 1) + dictionary.pad() + 1 - self.dummy_tgt = torch.arange(args.tgt_len + 1) + dictionary.pad() + 1 - - @classmethod - def setup_task(cls, args, **kwargs): - """Setup the task. """ - dictionary = Dictionary() - for i in range(args.dict_size): - dictionary.add_symbol("word{}".format(i)) - logger.info("dictionary: {} types".format(len(dictionary))) - - args.max_source_positions = args.src_len + dictionary.pad() + 2 - args.max_target_positions = args.tgt_len + dictionary.pad() + 2 - - return cls(args, dictionary) - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - """Load a given dataset split. - Args: - split (str): name of the split (e.g., train, valid, test) - """ - item_size = max(self.args.src_len, self.args.tgt_len) - if self.args.batch_size is not None: - bsz = self.args.batch_size - else: - bsz = max(1, self.args.max_tokens // item_size) - tgt = torch.stack([self.dummy_tgt for _ in range(bsz)]) - self.datasets[split] = DummyDataset( - { - "id": 1, - "net_input": { - "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), - "src_lengths": torch.full( - (bsz,), self.args.src_len, dtype=torch.long - ), - "prev_output_tokens": tgt.clone(), - }, - "target": tgt, - "nsentences": bsz, - "ntokens": bsz * self.args.tgt_len, - }, - num_items=self.args.dataset_size, - item_size=item_size, - ) - - @property - def source_dictionary(self): - return self.dictionary - - @property - def target_dictionary(self): - return self.dictionary - - -class DummyDataset(FairseqDataset): - def __init__(self, batch, num_items, item_size): - super().__init__() - self.batch = batch - self.num_items = num_items - self.item_size = item_size - - def __getitem__(self, index): - return index - - def __len__(self): - return self.num_items - - def collater(self, samples): - return self.batch - - @property - def sizes(self): - return np.array([self.item_size] * self.num_items) - - def num_tokens(self, index): - return self.item_size - - def size(self, index): - return self.item_size - - def ordered_indices(self): - return np.arange(self.num_items) - - @property - def supports_prefetch(self): - return False diff --git a/spaces/ORI-Muchim/BlueArchiveTTS/app.py b/spaces/ORI-Muchim/BlueArchiveTTS/app.py deleted file mode 100644 index 645c771428236a9b99a0e9a663fbe2affd25571f..0000000000000000000000000000000000000000 --- a/spaces/ORI-Muchim/BlueArchiveTTS/app.py +++ /dev/null @@ -1,164 +0,0 @@ -import json -import os -import re - -import librosa -import numpy as np -import torch -from torch import no_grad, LongTensor -import commons -import utils -import gradio as gr -from models import SynthesizerTrn -from text import text_to_sequence, _clean_text -from mel_processing import spectrogram_torch - -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - - -def get_text(text, hps, is_phoneme): - text_norm = text_to_sequence(text, hps.symbols, [] if is_phoneme else hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm - - -def create_tts_fn(model, hps, speaker_ids): - def tts_fn(text, speaker, speed, is_phoneme): - if limitation: - text_len = len(text) - max_len = 500 - if is_phoneme: - max_len *= 3 - else: - if len(hps.data.text_cleaners) > 0 and hps.data.text_cleaners[0] == "zh_ja_mixture_cleaners": - text_len = len(re.sub("(\[ZH\]|\[JA\])", "", text)) - if text_len > max_len: - return "Error: Text is too long", None - - speaker_id = speaker_ids[speaker] - stn_tst = get_text(text, hps, is_phoneme) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - sid = LongTensor([speaker_id]) - audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, - length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy() - del stn_tst, x_tst, x_tst_lengths, sid - return "Success", (hps.data.sampling_rate, audio) - - return tts_fn - - - - - -def create_to_phoneme_fn(hps): - def to_phoneme_fn(text): - return _clean_text(text, hps.data.text_cleaners) if text != "" else "" - - return to_phoneme_fn - - -css = """ - #advanced-btn { - color: white; - border-color: black; - background: black; - font-size: .7rem !important; - line-height: 19px; - margin-top: 24px; - margin-bottom: 12px; - padding: 2px 8px; - border-radius: 14px !important; - } - #advanced-options { - display: none; - margin-bottom: 20px; - } -""" - -if __name__ == '__main__': - models_tts = [] - models_vc = [] - models_soft_vc = [] - name = 'BlueArchiveTTS' - lang = '日本語 (Japanese)' - example = '先生、何をお手伝いしましょうか?' - config_path = f"saved_model/config.json" - model_path = f"saved_model/model.pth" - cover_path = f"saved_model/cover.png" - hps = utils.get_hparams_from_file(config_path) - model = SynthesizerTrn( - len(hps.symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model) - utils.load_checkpoint(model_path, model, None) - model.eval() - speaker_ids = [sid for sid, name in enumerate(hps.speakers) if name != "None"] - speakers = [name for sid, name in enumerate(hps.speakers) if name != "None"] - - t = 'vits' - models_tts.append((name, cover_path, speakers, lang, example, - hps.symbols, create_tts_fn(model, hps, speaker_ids), - create_to_phoneme_fn(hps))) - - - app = gr.Blocks(css=css) - - with app: - gr.Markdown("# BlueArchiveTTS Using VITS Model\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=ORI-Muchim.BlueArchiveTTS)\n\n") - with gr.Tabs(): - with gr.TabItem("TTS"): - with gr.Tabs(): - for i, (name, cover_path, speakers, lang, example, symbols, tts_fn, - to_phoneme_fn) in enumerate(models_tts): - with gr.TabItem(f"BlueArchive"): - with gr.Column(): - gr.Markdown(f"## {name}\n\n" - f"![cover](file/{cover_path})\n\n" - f"lang: {lang}") - tts_input1 = gr.TextArea(label="Text (500 words limitation)", value=example, - elem_id=f"tts-input{i}") - tts_input2 = gr.Dropdown(label="Speaker", choices=speakers, - type="index", value=speakers[0]) - tts_input3 = gr.Slider(label="Speed", value=1, minimum=0.1, maximum=2, step=0.1) - with gr.Accordion(label="Advanced Options", open=False): - phoneme_input = gr.Checkbox(value=False, label="Phoneme input") - to_phoneme_btn = gr.Button("Covert text to phoneme") - phoneme_list = gr.Dataset(label="Phoneme list", components=[tts_input1], - samples=[[x] for x in symbols], - elem_id=f"phoneme-list{i}") - phoneme_list_json = gr.Json(value=symbols, visible=False) - tts_submit = gr.Button("Generate", variant="primary") - tts_output1 = gr.Textbox(label="Output Message") - tts_output2 = gr.Audio(label="Output Audio") - tts_submit.click(tts_fn, [tts_input1, tts_input2, tts_input3, phoneme_input], - [tts_output1, tts_output2]) - to_phoneme_btn.click(to_phoneme_fn, [tts_input1], [tts_input1]) - phoneme_list.click(None, [phoneme_list, phoneme_list_json], [], - _js=f""" - (i,phonemes) => {{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let text_input = root.querySelector("#tts-input{i}").querySelector("textarea"); - let startPos = text_input.selectionStart; - let endPos = text_input.selectionEnd; - let oldTxt = text_input.value; - let result = oldTxt.substring(0, startPos) + phonemes[i] + oldTxt.substring(endPos); - text_input.value = result; - let x = window.scrollX, y = window.scrollY; - text_input.focus(); - text_input.selectionStart = startPos + phonemes[i].length; - text_input.selectionEnd = startPos + phonemes[i].length; - text_input.blur(); - window.scrollTo(x, y); - return []; - }}""") - - app.queue(concurrency_count=3).launch(show_api=False) diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/training/modules/ffc.py b/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/training/modules/ffc.py deleted file mode 100644 index 2f8aeb1411fc1537916275fd3243706cc74b8d3c..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/training/modules/ffc.py +++ /dev/null @@ -1,433 +0,0 @@ -# Fast Fourier Convolution NeurIPS 2020 -# original implementation https://github.com/pkumivision/FFC/blob/main/model_zoo/ffc.py -# paper https://proceedings.neurips.cc/paper/2020/file/2fd5d41ec6cfab47e32164d5624269b1-Paper.pdf - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -from saicinpainting.training.modules.base import get_activation, BaseDiscriminator -from saicinpainting.training.modules.spatial_transform import LearnableSpatialTransformWrapper -from saicinpainting.training.modules.squeeze_excitation import SELayer -from saicinpainting.utils import get_shape - - -class FFCSE_block(nn.Module): - - def __init__(self, channels, ratio_g): - super(FFCSE_block, self).__init__() - in_cg = int(channels * ratio_g) - in_cl = channels - in_cg - r = 16 - - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.conv1 = nn.Conv2d(channels, channels // r, - kernel_size=1, bias=True) - self.relu1 = nn.ReLU(inplace=True) - self.conv_a2l = None if in_cl == 0 else nn.Conv2d( - channels // r, in_cl, kernel_size=1, bias=True) - self.conv_a2g = None if in_cg == 0 else nn.Conv2d( - channels // r, in_cg, kernel_size=1, bias=True) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - x = x if type(x) is tuple else (x, 0) - id_l, id_g = x - - x = id_l if type(id_g) is int else torch.cat([id_l, id_g], dim=1) - x = self.avgpool(x) - x = self.relu1(self.conv1(x)) - - x_l = 0 if self.conv_a2l is None else id_l * \ - self.sigmoid(self.conv_a2l(x)) - x_g = 0 if self.conv_a2g is None else id_g * \ - self.sigmoid(self.conv_a2g(x)) - return x_l, x_g - - -class FourierUnit(nn.Module): - - def __init__(self, in_channels, out_channels, groups=1, spatial_scale_factor=None, spatial_scale_mode='bilinear', - spectral_pos_encoding=False, use_se=False, se_kwargs=None, ffc3d=False, fft_norm='ortho'): - # bn_layer not used - super(FourierUnit, self).__init__() - self.groups = groups - - self.conv_layer = torch.nn.Conv2d(in_channels=in_channels * 2 + (2 if spectral_pos_encoding else 0), - out_channels=out_channels * 2, - kernel_size=1, stride=1, padding=0, groups=self.groups, bias=False) - self.bn = torch.nn.BatchNorm2d(out_channels * 2) - self.relu = torch.nn.ReLU(inplace=True) - - # squeeze and excitation block - self.use_se = use_se - if use_se: - if se_kwargs is None: - se_kwargs = {} - self.se = SELayer(self.conv_layer.in_channels, **se_kwargs) - - self.spatial_scale_factor = spatial_scale_factor - self.spatial_scale_mode = spatial_scale_mode - self.spectral_pos_encoding = spectral_pos_encoding - self.ffc3d = ffc3d - self.fft_norm = fft_norm - - def forward(self, x): - batch = x.shape[0] - - if self.spatial_scale_factor is not None: - orig_size = x.shape[-2:] - x = F.interpolate(x, scale_factor=self.spatial_scale_factor, mode=self.spatial_scale_mode, align_corners=False) - - r_size = x.size() - # (batch, c, h, w/2+1, 2) - fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1) - ffted = torch.fft.rfftn(x, dim=fft_dim, norm=self.fft_norm) - ffted = torch.stack((ffted.real, ffted.imag), dim=-1) - ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1) - ffted = ffted.view((batch, -1,) + ffted.size()[3:]) - - if self.spectral_pos_encoding: - height, width = ffted.shape[-2:] - coords_vert = torch.linspace(0, 1, height)[None, None, :, None].expand(batch, 1, height, width).to(ffted) - coords_hor = torch.linspace(0, 1, width)[None, None, None, :].expand(batch, 1, height, width).to(ffted) - ffted = torch.cat((coords_vert, coords_hor, ffted), dim=1) - - if self.use_se: - ffted = self.se(ffted) - - ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1) - ffted = self.relu(self.bn(ffted)) - - ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute( - 0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2) - ffted = torch.complex(ffted[..., 0], ffted[..., 1]) - - ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:] - output = torch.fft.irfftn(ffted, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm) - - if self.spatial_scale_factor is not None: - output = F.interpolate(output, size=orig_size, mode=self.spatial_scale_mode, align_corners=False) - - return output - - -class SpectralTransform(nn.Module): - - def __init__(self, in_channels, out_channels, stride=1, groups=1, enable_lfu=True, **fu_kwargs): - # bn_layer not used - super(SpectralTransform, self).__init__() - self.enable_lfu = enable_lfu - if stride == 2: - self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2) - else: - self.downsample = nn.Identity() - - self.stride = stride - self.conv1 = nn.Sequential( - nn.Conv2d(in_channels, out_channels // - 2, kernel_size=1, groups=groups, bias=False), - nn.BatchNorm2d(out_channels // 2), - nn.ReLU(inplace=True) - ) - self.fu = FourierUnit( - out_channels // 2, out_channels // 2, groups, **fu_kwargs) - if self.enable_lfu: - self.lfu = FourierUnit( - out_channels // 2, out_channels // 2, groups) - self.conv2 = torch.nn.Conv2d( - out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False) - - def forward(self, x): - - x = self.downsample(x) - x = self.conv1(x) - output = self.fu(x) - - if self.enable_lfu: - n, c, h, w = x.shape - split_no = 2 - split_s = h // split_no - xs = torch.cat(torch.split( - x[:, :c // 4], split_s, dim=-2), dim=1).contiguous() - xs = torch.cat(torch.split(xs, split_s, dim=-1), - dim=1).contiguous() - xs = self.lfu(xs) - xs = xs.repeat(1, 1, split_no, split_no).contiguous() - else: - xs = 0 - - output = self.conv2(x + output + xs) - - return output - - -class FFC(nn.Module): - - def __init__(self, in_channels, out_channels, kernel_size, - ratio_gin, ratio_gout, stride=1, padding=0, - dilation=1, groups=1, bias=False, enable_lfu=True, - padding_type='reflect', gated=False, **spectral_kwargs): - super(FFC, self).__init__() - - assert stride == 1 or stride == 2, "Stride should be 1 or 2." - self.stride = stride - - in_cg = int(in_channels * ratio_gin) - in_cl = in_channels - in_cg - out_cg = int(out_channels * ratio_gout) - out_cl = out_channels - out_cg - #groups_g = 1 if groups == 1 else int(groups * ratio_gout) - #groups_l = 1 if groups == 1 else groups - groups_g - - self.ratio_gin = ratio_gin - self.ratio_gout = ratio_gout - self.global_in_num = in_cg - - module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d - self.convl2l = module(in_cl, out_cl, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d - self.convl2g = module(in_cl, out_cg, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d - self.convg2l = module(in_cg, out_cl, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform - self.convg2g = module( - in_cg, out_cg, stride, 1 if groups == 1 else groups // 2, enable_lfu, **spectral_kwargs) - - self.gated = gated - module = nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else nn.Conv2d - self.gate = module(in_channels, 2, 1) - - def forward(self, x): - x_l, x_g = x if type(x) is tuple else (x, 0) - out_xl, out_xg = 0, 0 - - if self.gated: - total_input_parts = [x_l] - if torch.is_tensor(x_g): - total_input_parts.append(x_g) - total_input = torch.cat(total_input_parts, dim=1) - - gates = torch.sigmoid(self.gate(total_input)) - g2l_gate, l2g_gate = gates.chunk(2, dim=1) - else: - g2l_gate, l2g_gate = 1, 1 - - if self.ratio_gout != 1: - out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate - if self.ratio_gout != 0: - out_xg = self.convl2g(x_l) * l2g_gate + self.convg2g(x_g) - - return out_xl, out_xg - - -class FFC_BN_ACT(nn.Module): - - def __init__(self, in_channels, out_channels, - kernel_size, ratio_gin, ratio_gout, - stride=1, padding=0, dilation=1, groups=1, bias=False, - norm_layer=nn.BatchNorm2d, activation_layer=nn.Identity, - padding_type='reflect', - enable_lfu=True, **kwargs): - super(FFC_BN_ACT, self).__init__() - self.ffc = FFC(in_channels, out_channels, kernel_size, - ratio_gin, ratio_gout, stride, padding, dilation, - groups, bias, enable_lfu, padding_type=padding_type, **kwargs) - lnorm = nn.Identity if ratio_gout == 1 else norm_layer - gnorm = nn.Identity if ratio_gout == 0 else norm_layer - global_channels = int(out_channels * ratio_gout) - self.bn_l = lnorm(out_channels - global_channels) - self.bn_g = gnorm(global_channels) - - lact = nn.Identity if ratio_gout == 1 else activation_layer - gact = nn.Identity if ratio_gout == 0 else activation_layer - self.act_l = lact(inplace=True) - self.act_g = gact(inplace=True) - - def forward(self, x): - x_l, x_g = self.ffc(x) - x_l = self.act_l(self.bn_l(x_l)) - x_g = self.act_g(self.bn_g(x_g)) - return x_l, x_g - - -class FFCResnetBlock(nn.Module): - def __init__(self, dim, padding_type, norm_layer, activation_layer=nn.ReLU, dilation=1, - spatial_transform_kwargs=None, inline=False, **conv_kwargs): - super().__init__() - self.conv1 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation, - norm_layer=norm_layer, - activation_layer=activation_layer, - padding_type=padding_type, - **conv_kwargs) - self.conv2 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation, - norm_layer=norm_layer, - activation_layer=activation_layer, - padding_type=padding_type, - **conv_kwargs) - if spatial_transform_kwargs is not None: - self.conv1 = LearnableSpatialTransformWrapper(self.conv1, **spatial_transform_kwargs) - self.conv2 = LearnableSpatialTransformWrapper(self.conv2, **spatial_transform_kwargs) - self.inline = inline - - def forward(self, x): - if self.inline: - x_l, x_g = x[:, :-self.conv1.ffc.global_in_num], x[:, -self.conv1.ffc.global_in_num:] - else: - x_l, x_g = x if type(x) is tuple else (x, 0) - - id_l, id_g = x_l, x_g - - x_l, x_g = self.conv1((x_l, x_g)) - x_l, x_g = self.conv2((x_l, x_g)) - - x_l, x_g = id_l + x_l, id_g + x_g - out = x_l, x_g - if self.inline: - out = torch.cat(out, dim=1) - return out - - -class ConcatTupleLayer(nn.Module): - def forward(self, x): - assert isinstance(x, tuple) - x_l, x_g = x - assert torch.is_tensor(x_l) or torch.is_tensor(x_g) - if not torch.is_tensor(x_g): - return x_l - return torch.cat(x, dim=1) - - -class FFCResNetGenerator(nn.Module): - def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, - padding_type='reflect', activation_layer=nn.ReLU, - up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), - init_conv_kwargs={}, downsample_conv_kwargs={}, resnet_conv_kwargs={}, - spatial_transform_layers=None, spatial_transform_kwargs={}, - add_out_act=True, max_features=1024, out_ffc=False, out_ffc_kwargs={}): - assert (n_blocks >= 0) - super().__init__() - - model = [nn.ReflectionPad2d(3), - FFC_BN_ACT(input_nc, ngf, kernel_size=7, padding=0, norm_layer=norm_layer, - activation_layer=activation_layer, **init_conv_kwargs)] - - ### downsample - for i in range(n_downsampling): - mult = 2 ** i - if i == n_downsampling - 1: - cur_conv_kwargs = dict(downsample_conv_kwargs) - cur_conv_kwargs['ratio_gout'] = resnet_conv_kwargs.get('ratio_gin', 0) - else: - cur_conv_kwargs = downsample_conv_kwargs - model += [FFC_BN_ACT(min(max_features, ngf * mult), - min(max_features, ngf * mult * 2), - kernel_size=3, stride=2, padding=1, - norm_layer=norm_layer, - activation_layer=activation_layer, - **cur_conv_kwargs)] - - mult = 2 ** n_downsampling - feats_num_bottleneck = min(max_features, ngf * mult) - - ### resnet blocks - for i in range(n_blocks): - cur_resblock = FFCResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation_layer=activation_layer, - norm_layer=norm_layer, **resnet_conv_kwargs) - if spatial_transform_layers is not None and i in spatial_transform_layers: - cur_resblock = LearnableSpatialTransformWrapper(cur_resblock, **spatial_transform_kwargs) - model += [cur_resblock] - - model += [ConcatTupleLayer()] - - ### upsample - for i in range(n_downsampling): - mult = 2 ** (n_downsampling - i) - model += [nn.ConvTranspose2d(min(max_features, ngf * mult), - min(max_features, int(ngf * mult / 2)), - kernel_size=3, stride=2, padding=1, output_padding=1), - up_norm_layer(min(max_features, int(ngf * mult / 2))), - up_activation] - - if out_ffc: - model += [FFCResnetBlock(ngf, padding_type=padding_type, activation_layer=activation_layer, - norm_layer=norm_layer, inline=True, **out_ffc_kwargs)] - - model += [nn.ReflectionPad2d(3), - nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] - if add_out_act: - model.append(get_activation('tanh' if add_out_act is True else add_out_act)) - self.model = nn.Sequential(*model) - - def forward(self, input): - return self.model(input) - - -class FFCNLayerDiscriminator(BaseDiscriminator): - def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, max_features=512, - init_conv_kwargs={}, conv_kwargs={}): - super().__init__() - self.n_layers = n_layers - - def _act_ctor(inplace=True): - return nn.LeakyReLU(negative_slope=0.2, inplace=inplace) - - kw = 3 - padw = int(np.ceil((kw-1.0)/2)) - sequence = [[FFC_BN_ACT(input_nc, ndf, kernel_size=kw, padding=padw, norm_layer=norm_layer, - activation_layer=_act_ctor, **init_conv_kwargs)]] - - nf = ndf - for n in range(1, n_layers): - nf_prev = nf - nf = min(nf * 2, max_features) - - cur_model = [ - FFC_BN_ACT(nf_prev, nf, - kernel_size=kw, stride=2, padding=padw, - norm_layer=norm_layer, - activation_layer=_act_ctor, - **conv_kwargs) - ] - sequence.append(cur_model) - - nf_prev = nf - nf = min(nf * 2, 512) - - cur_model = [ - FFC_BN_ACT(nf_prev, nf, - kernel_size=kw, stride=1, padding=padw, - norm_layer=norm_layer, - activation_layer=lambda *args, **kwargs: nn.LeakyReLU(*args, negative_slope=0.2, **kwargs), - **conv_kwargs), - ConcatTupleLayer() - ] - sequence.append(cur_model) - - sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] - - for n in range(len(sequence)): - setattr(self, 'model'+str(n), nn.Sequential(*sequence[n])) - - def get_all_activations(self, x): - res = [x] - for n in range(self.n_layers + 2): - model = getattr(self, 'model' + str(n)) - res.append(model(res[-1])) - return res[1:] - - def forward(self, x): - act = self.get_all_activations(x) - feats = [] - for out in act[:-1]: - if isinstance(out, tuple): - if torch.is_tensor(out[1]): - out = torch.cat(out, dim=1) - else: - out = out[0] - feats.append(out) - return act[-1], feats diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/decode_heads/cc_head.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/decode_heads/cc_head.py deleted file mode 100644 index 5b9abb4e747f92657f4220b29788539340986c00..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/decode_heads/cc_head.py +++ /dev/null @@ -1,42 +0,0 @@ -import torch - -from ..builder import HEADS -from .fcn_head import FCNHead - -try: - from annotator.uniformer.mmcv.ops import CrissCrossAttention -except ModuleNotFoundError: - CrissCrossAttention = None - - -@HEADS.register_module() -class CCHead(FCNHead): - """CCNet: Criss-Cross Attention for Semantic Segmentation. - - This head is the implementation of `CCNet - <https://arxiv.org/abs/1811.11721>`_. - - Args: - recurrence (int): Number of recurrence of Criss Cross Attention - module. Default: 2. - """ - - def __init__(self, recurrence=2, **kwargs): - if CrissCrossAttention is None: - raise RuntimeError('Please install mmcv-full for ' - 'CrissCrossAttention ops') - super(CCHead, self).__init__(num_convs=2, **kwargs) - self.recurrence = recurrence - self.cca = CrissCrossAttention(self.channels) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - output = self.convs[0](x) - for _ in range(self.recurrence): - output = self.cca(output) - output = self.convs[1](output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/PeepDaSlan9/AutoGPT/scripts/check_requirements.py b/spaces/PeepDaSlan9/AutoGPT/scripts/check_requirements.py deleted file mode 100644 index e4eab024a6280c0d54110c69b2e03de639325fa6..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/AutoGPT/scripts/check_requirements.py +++ /dev/null @@ -1,32 +0,0 @@ -import sys - -import pkg_resources - - -def main(): - requirements_file = sys.argv[1] - with open(requirements_file, "r") as f: - required_packages = [ - line.strip().split("#")[0].strip() for line in f.readlines() - ] - - installed_packages = [package.key for package in pkg_resources.working_set] - - missing_packages = [] - for package in required_packages: - if not package: # Skip empty lines - continue - package_name = package.strip().split("==")[0] - if package_name.lower() not in installed_packages: - missing_packages.append(package_name) - - if missing_packages: - print("Missing packages:") - print(", ".join(missing_packages)) - sys.exit(1) - else: - print("All packages are installed.") - - -if __name__ == "__main__": - main() diff --git a/spaces/PeepDaSlan9/De-limiter/eval_delimit/score_calc_delimit.py b/spaces/PeepDaSlan9/De-limiter/eval_delimit/score_calc_delimit.py deleted file mode 100644 index cfc2cc66b2a9b2e927c74177ca6a1f0fc8aa5f50..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/De-limiter/eval_delimit/score_calc_delimit.py +++ /dev/null @@ -1,145 +0,0 @@ -# Calculate SI-SDR, Multi-resolution spectrogram mse score of the pre-inferenced sources -import os -import argparse -import csv -import json -import glob - -import tqdm -import numpy as np -import librosa -import pyloudnorm as pyln -from asteroid.metrics import get_metrics - -from utils import str2bool - - -def multi_resolution_spectrogram_mse( - gt, est, n_fft=[2048, 1024, 512], n_hop=[512, 256, 128] -): - assert gt.shape == est.shape - assert len(n_fft) == len(n_hop) - - score = 0.0 - for i in range(len(n_fft)): - gt_spec = librosa.magphase( - librosa.stft(gt, n_fft=n_fft[i], hop_length=n_hop[i]) - )[0] - est_spec = librosa.magphase( - librosa.stft(est, n_fft=n_fft[i], hop_length=n_hop[i]) - )[0] - score = score + np.mean((gt_spec - est_spec) ** 2) - - return score - - -parser = argparse.ArgumentParser(description="model test.py") - -parser.add_argument( - "--target", - type=str, - default="all", - help="target source. all, vocals, drums, bass, other, 0.5_mixed", -) -parser.add_argument( - "--root", type=str, default="/path/to/musdb18hq_loudnorm" -) -parser.add_argument("--exp_name", type=str, default="convtasnet_6_s") -parser.add_argument( - "--output_directory", - type=str, - default="/path/to/results", -) -parser.add_argument("--loudnorm_lufs", type=float, default=-14.0) -parser.add_argument( - "--calc_mse", - type=str2bool, - default=True, - help="calculate multi-resolution spectrogram mse", -) - -parser.add_argument( - "--calc_results", - type=str2bool, - default=True, - help="Set this True when you want to calculate the results of the test set. Set this False when calculating musdb-hq vs musdb-XL. (top row in Table 1.)", -) - -args, _ = parser.parse_known_args() - -args.sample_rate = 44100 - -meter = pyln.Meter(args.sample_rate) - -if args.calc_results: - args.test_output_dir = f"{args.output_directory}/test/{args.exp_name}" -else: - args.test_output_dir = f"{args.output_directory}/{args.exp_name}" - -if args.target == "all" or args.target == "0.5_mixed": - test_tracks = glob.glob(f"{args.root}/*/mixture.wav") -else: - test_tracks = glob.glob(f"{args.root}/*/{args.target}.wav") -i = 0 - -dict_song_score = {} -list_si_sdr = [] -list_multi_mse = [] -for track in tqdm.tqdm(test_tracks): - if args.target == "all": # for standard de-limiter estimation - audio_name = os.path.basename(os.path.dirname(track)) - gt_source = librosa.load(track, sr=args.sample_rate, mono=False)[0] - - est_delimiter = librosa.load( - f"{args.test_output_dir}/{audio_name}/all.wav", - sr=args.sample_rate, - mono=False, - )[0] - - else: # for source-separated de-limiter estimation - audio_name = os.path.basename(os.path.dirname(track)) - gt_source = librosa.load(track, sr=args.sample_rate, mono=False)[0] - est_delimiter = librosa.load( - f"{args.test_output_dir}/{audio_name}/{args.target}.wav", - sr=args.sample_rate, - mono=False, - )[0] - - - metrics_dict = get_metrics( - gt_source + est_delimiter, - gt_source, - est_delimiter, - sample_rate=args.sample_rate, - metrics_list=["si_sdr"], - ) - - if args.calc_mse: - multi_resolution_spectrogram_mse_score = multi_resolution_spectrogram_mse( - gt_source, est_delimiter - ) - else: - multi_resolution_spectrogram_mse_score = None - - dict_song_score[audio_name] = { - "si_sdr": metrics_dict["si_sdr"], - "multi_mse": multi_resolution_spectrogram_mse_score, - } - list_si_sdr.append(metrics_dict["si_sdr"]) - list_multi_mse.append(multi_resolution_spectrogram_mse_score) - - i += 1 - -print(f"{args.exp_name} on {args.target}") -print(f"SI-SDR score: {sum(list_si_sdr) / len(list_si_sdr)}") -if args.calc_mse: - print(f"multi-mse score: {sum(list_multi_mse) / len(list_multi_mse)}") - -if args.target != "all": - # save dict_song_score to json file - with open(f"{args.test_output_dir}/score_{args.target}.json", "w") as f: - json.dump(dict_song_score, f, indent=4) -else: - # save dict_song_score to json file - with open(f"{args.test_output_dir}/score.json", "w") as f: - json.dump(dict_song_score, f, indent=4) diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/structures/segmentation_mask.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/structures/segmentation_mask.py deleted file mode 100644 index 208d9648b2826e1b4a55403890f5b54251c08fed..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/structures/segmentation_mask.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import torch - -import pycocotools.mask as mask_utils - -# transpose -FLIP_LEFT_RIGHT = 0 -FLIP_TOP_BOTTOM = 1 - - -class Mask(object): - """ - This class is unfinished and not meant for use yet - It is supposed to contain the mask for an object as - a 2d tensor - """ - - def __init__(self, masks, size, mode): - self.masks = masks - self.size = size - self.mode = mode - - def transpose(self, method): - if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM): - raise NotImplementedError( - "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented" - ) - - width, height = self.size - if method == FLIP_LEFT_RIGHT: - dim = width - idx = 2 - elif method == FLIP_TOP_BOTTOM: - dim = height - idx = 1 - - flip_idx = list(range(dim)[::-1]) - flipped_masks = self.masks.index_select(dim, flip_idx) - return Mask(flipped_masks, self.size, self.mode) - - def crop(self, box): - w, h = box[2] - box[0], box[3] - box[1] - - cropped_masks = self.masks[:, box[1] : box[3], box[0] : box[2]] - return Mask(cropped_masks, size=(w, h), mode=self.mode) - - def resize(self, size, *args, **kwargs): - pass - - -class Polygons(object): - """ - This class holds a set of polygons that represents a single instance - of an object mask. The object can be represented as a set of - polygons - """ - - def __init__(self, polygons, size, mode): - # assert isinstance(polygons, list), '{}'.format(polygons) - if isinstance(polygons, list): - polygons = [torch.as_tensor(p, dtype=torch.float32) for p in polygons] - elif isinstance(polygons, Polygons): - polygons = polygons.polygons - - self.polygons = polygons - self.size = size - self.mode = mode - - def transpose(self, method): - if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM): - raise NotImplementedError( - "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented" - ) - - flipped_polygons = [] - width, height = self.size - if method == FLIP_LEFT_RIGHT: - dim = width - idx = 0 - elif method == FLIP_TOP_BOTTOM: - dim = height - idx = 1 - - for poly in self.polygons: - p = poly.clone() - TO_REMOVE = 1 - p[idx::2] = dim - poly[idx::2] - TO_REMOVE - flipped_polygons.append(p) - - return Polygons(flipped_polygons, size=self.size, mode=self.mode) - - def crop(self, box): - w, h = box[2] - box[0], box[3] - box[1] - - # TODO chck if necessary - w = max(w, 1) - h = max(h, 1) - - cropped_polygons = [] - for poly in self.polygons: - p = poly.clone() - p[0::2] = p[0::2] - box[0] # .clamp(min=0, max=w) - p[1::2] = p[1::2] - box[1] # .clamp(min=0, max=h) - cropped_polygons.append(p) - - return Polygons(cropped_polygons, size=(w, h), mode=self.mode) - - def resize(self, size, *args, **kwargs): - ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size)) - if ratios[0] == ratios[1]: - ratio = ratios[0] - scaled_polys = [p * ratio for p in self.polygons] - return Polygons(scaled_polys, size, mode=self.mode) - - ratio_w, ratio_h = ratios - scaled_polygons = [] - for poly in self.polygons: - p = poly.clone() - p[0::2] *= ratio_w - p[1::2] *= ratio_h - scaled_polygons.append(p) - - return Polygons(scaled_polygons, size=size, mode=self.mode) - - def convert(self, mode): - width, height = self.size - if mode == "mask": - rles = mask_utils.frPyObjects( - [p.detach().numpy() for p in self.polygons], height, width - ) - rle = mask_utils.merge(rles) - mask = mask_utils.decode(rle) - mask = torch.from_numpy(mask) - # TODO add squeeze? - return mask - - def __repr__(self): - s = self.__class__.__name__ + "(" - s += "num_polygons={}, ".format(len(self.polygons)) - s += "image_width={}, ".format(self.size[0]) - s += "image_height={}, ".format(self.size[1]) - s += "mode={})".format(self.mode) - return s - - -class SegmentationMask(object): - """ - This class stores the segmentations for all objects in the image - """ - - def __init__(self, polygons, size, mode=None): - """ - Arguments: - polygons: a list of list of lists of numbers. The first - level of the list correspond to individual instances, - the second level to all the polygons that compose the - object, and the third level to the polygon coordinates. - """ - assert isinstance(polygons, list) - - self.polygons = [Polygons(p, size, mode) for p in polygons] - self.size = size - self.mode = mode - - def transpose(self, method): - if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM): - raise NotImplementedError( - "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented" - ) - - flipped = [] - for polygon in self.polygons: - flipped.append(polygon.transpose(method)) - return SegmentationMask(flipped, size=self.size, mode=self.mode) - - def crop(self, box): - w, h = box[2] - box[0], box[3] - box[1] - cropped = [] - for polygon in self.polygons: - cropped.append(polygon.crop(box)) - return SegmentationMask(cropped, size=(w, h), mode=self.mode) - - def resize(self, size, *args, **kwargs): - scaled = [] - for polygon in self.polygons: - scaled.append(polygon.resize(size, *args, **kwargs)) - return SegmentationMask(scaled, size=size, mode=self.mode) - - def to(self, *args, **kwargs): - return self - - def __getitem__(self, item): - if isinstance(item, (int, slice)): - selected_polygons = [self.polygons[item]] - else: - # advanced indexing on a single dimension - selected_polygons = [] - if isinstance(item, torch.Tensor) and item.dtype == torch.bool: - item = item.nonzero() - item = item.squeeze(1) if item.numel() > 0 else item - item = item.tolist() - for i in item: - selected_polygons.append(self.polygons[i]) - return SegmentationMask(selected_polygons, size=self.size, mode=self.mode) - - def __iter__(self): - return iter(self.polygons) - - def __repr__(self): - s = self.__class__.__name__ + "(" - s += "num_instances={}, ".format(len(self.polygons)) - s += "image_width={}, ".format(self.size[0]) - s += "image_height={})".format(self.size[1]) - return s diff --git a/spaces/Pranjal12345/Text_to_Speech/tortoise/eval.py b/spaces/Pranjal12345/Text_to_Speech/tortoise/eval.py deleted file mode 100644 index 312b162234a927e8e7bd89c26f782487ed67bc07..0000000000000000000000000000000000000000 --- a/spaces/Pranjal12345/Text_to_Speech/tortoise/eval.py +++ /dev/null @@ -1,27 +0,0 @@ -import argparse -import os - -import torchaudio - -from api import TextToSpeech -from tortoise.utils.audio import load_audio - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--eval_path', type=str, help='Path to TSV test file', default="D:\\tmp\\tortoise-tts-eval\\test.tsv") - parser.add_argument('--output_path', type=str, help='Where to put results', default="D:\\tmp\\tortoise-tts-eval\\baseline") - parser.add_argument('--preset', type=str, help='Rendering preset.', default="standard") - args = parser.parse_args() - os.makedirs(args.output_path, exist_ok=True) - - tts = TextToSpeech() - - with open(args.eval_path, 'r', encoding='utf-8') as f: - lines = f.readlines() - - for line in lines: - text, real = line.strip().split('\t') - conds = [load_audio(real, 22050)] - gen = tts.tts_with_preset(text, voice_samples=conds, conditioning_latents=None, preset=args.preset) - torchaudio.save(os.path.join(args.output_path, os.path.basename(real)), gen.squeeze(0).cpu(), 24000) - diff --git a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/modules/losses/vqperceptual.py b/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/modules/losses/vqperceptual.py deleted file mode 100644 index c2febd445728479d4cd9aacdb2572cb1f1af04db..0000000000000000000000000000000000000000 --- a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/modules/losses/vqperceptual.py +++ /dev/null @@ -1,136 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from taming.modules.losses.lpips import LPIPS -from taming.modules.discriminator.model import NLayerDiscriminator, weights_init - - -class DummyLoss(nn.Module): - def __init__(self): - super().__init__() - - -def adopt_weight(weight, global_step, threshold=0, value=0.): - if global_step < threshold: - weight = value - return weight - - -def hinge_d_loss(logits_real, logits_fake): - loss_real = torch.mean(F.relu(1. - logits_real)) - loss_fake = torch.mean(F.relu(1. + logits_fake)) - d_loss = 0.5 * (loss_real + loss_fake) - return d_loss - - -def vanilla_d_loss(logits_real, logits_fake): - d_loss = 0.5 * ( - torch.mean(torch.nn.functional.softplus(-logits_real)) + - torch.mean(torch.nn.functional.softplus(logits_fake))) - return d_loss - - -class VQLPIPSWithDiscriminator(nn.Module): - def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0, - disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, - perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, - disc_ndf=64, disc_loss="hinge"): - super().__init__() - assert disc_loss in ["hinge", "vanilla"] - self.codebook_weight = codebook_weight - self.pixel_weight = pixelloss_weight - self.perceptual_loss = LPIPS().eval() - self.perceptual_weight = perceptual_weight - - self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, - n_layers=disc_num_layers, - use_actnorm=use_actnorm, - ndf=disc_ndf - ).apply(weights_init) - self.discriminator_iter_start = disc_start - if disc_loss == "hinge": - self.disc_loss = hinge_d_loss - elif disc_loss == "vanilla": - self.disc_loss = vanilla_d_loss - else: - raise ValueError(f"Unknown GAN loss '{disc_loss}'.") - print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.") - self.disc_factor = disc_factor - self.discriminator_weight = disc_weight - self.disc_conditional = disc_conditional - - def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): - if last_layer is not None: - nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] - else: - nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] - - d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) - d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() - d_weight = d_weight * self.discriminator_weight - return d_weight - - def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, - global_step, last_layer=None, cond=None, split="train"): - rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) - if self.perceptual_weight > 0: - p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) - rec_loss = rec_loss + self.perceptual_weight * p_loss - else: - p_loss = torch.tensor([0.0]) - - nll_loss = rec_loss - #nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] - nll_loss = torch.mean(nll_loss) - - # now the GAN part - if optimizer_idx == 0: - # generator update - if cond is None: - assert not self.disc_conditional - logits_fake = self.discriminator(reconstructions.contiguous()) - else: - assert self.disc_conditional - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) - g_loss = -torch.mean(logits_fake) - - try: - d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) - except RuntimeError: - assert not self.training - d_weight = torch.tensor(0.0) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean() - - log = {"{}/total_loss".format(split): loss.clone().detach().mean(), - "{}/quant_loss".format(split): codebook_loss.detach().mean(), - "{}/nll_loss".format(split): nll_loss.detach().mean(), - "{}/rec_loss".format(split): rec_loss.detach().mean(), - "{}/p_loss".format(split): p_loss.detach().mean(), - "{}/d_weight".format(split): d_weight.detach(), - "{}/disc_factor".format(split): torch.tensor(disc_factor), - "{}/g_loss".format(split): g_loss.detach().mean(), - } - return loss, log - - if optimizer_idx == 1: - # second pass for discriminator update - if cond is None: - logits_real = self.discriminator(inputs.contiguous().detach()) - logits_fake = self.discriminator(reconstructions.contiguous().detach()) - else: - logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) - - log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), - "{}/logits_real".format(split): logits_real.detach().mean(), - "{}/logits_fake".format(split): logits_fake.detach().mean() - } - return d_loss, log diff --git a/spaces/Qiwei97/Pubmed_Analyzer/questiongenerator.py b/spaces/Qiwei97/Pubmed_Analyzer/questiongenerator.py deleted file mode 100644 index 1e4ad9051134b3ffde8084936722723f24407486..0000000000000000000000000000000000000000 --- a/spaces/Qiwei97/Pubmed_Analyzer/questiongenerator.py +++ /dev/null @@ -1,356 +0,0 @@ -# Adapted from https://github.com/AMontgomerie/question_generator - -import os -import sys -import math -import numpy as np -import torch -import spacy -import re -import random -import json -import en_core_web_sm -from transformers import ( - AutoTokenizer, - AutoModelForSeq2SeqLM, - AutoModelForSequenceClassification, -) - - -class QuestionGenerator: - def __init__(self, model_dir=None): - - QG_PRETRAINED = "iarfmoose/t5-base-question-generator" - self.ANSWER_TOKEN = "<answer>" - self.CONTEXT_TOKEN = "<context>" - self.SEQ_LENGTH = 512 - - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - self.qg_tokenizer = AutoTokenizer.from_pretrained(QG_PRETRAINED, use_fast=False) - self.qg_model = AutoModelForSeq2SeqLM.from_pretrained(QG_PRETRAINED) - self.qg_model.to(self.device) - - self.qa_evaluator = QAEvaluator(model_dir) - - def generate( - self, article, use_evaluator=True, num_questions=None, answer_style="all" - ): - - print("Generating questions...\n") - - qg_inputs, qg_answers = self.generate_qg_inputs(article, answer_style) - generated_questions = self.generate_questions_from_inputs(qg_inputs) - - message = "{} questions doesn't match {} answers".format( - len(generated_questions), len(qg_answers) - ) - assert len(generated_questions) == len(qg_answers), message - - if use_evaluator: - - print("Evaluating QA pairs...\n") - - encoded_qa_pairs = self.qa_evaluator.encode_qa_pairs( - generated_questions, qg_answers - ) - scores = self.qa_evaluator.get_scores(encoded_qa_pairs) - if num_questions: - qa_list = self._get_ranked_qa_pairs( - generated_questions, qg_answers, scores, num_questions - ) - else: - qa_list = self._get_ranked_qa_pairs( - generated_questions, qg_answers, scores - ) - - else: - print("Skipping evaluation step.\n") - qa_list = self._get_all_qa_pairs(generated_questions, qg_answers) - - return qa_list - - def generate_qg_inputs(self, text, answer_style): - - VALID_ANSWER_STYLES = ["all", "sentences", "multiple_choice"] - - if answer_style not in VALID_ANSWER_STYLES: - raise ValueError( - "Invalid answer style {}. Please choose from {}".format( - answer_style, VALID_ANSWER_STYLES - ) - ) - - inputs = [] - answers = [] - - if answer_style == "sentences" or answer_style == "all": - segments = self._split_into_segments(text) - for segment in segments: - sentences = self._split_text(segment) - prepped_inputs, prepped_answers = self._prepare_qg_inputs( - sentences, segment - ) - inputs.extend(prepped_inputs) - answers.extend(prepped_answers) - - if answer_style == "multiple_choice" or answer_style == "all": - sentences = self._split_text(text) - prepped_inputs, prepped_answers = self._prepare_qg_inputs_MC(sentences) - inputs.extend(prepped_inputs) - answers.extend(prepped_answers) - - return inputs, answers - - def generate_questions_from_inputs(self, qg_inputs): - generated_questions = [] - - for qg_input in qg_inputs: - question = self._generate_question(qg_input) - generated_questions.append(question) - - return generated_questions - - def _split_text(self, text): - MAX_SENTENCE_LEN = 128 - - sentences = re.findall(".*?[.!\?]", text) - - cut_sentences = [] - for sentence in sentences: - if len(sentence) > MAX_SENTENCE_LEN: - cut_sentences.extend(re.split("[,;:)]", sentence)) - # temporary solution to remove useless post-quote sentence fragments - cut_sentences = [s for s in sentences if len(s.split(" ")) > 5] - sentences = sentences + cut_sentences - - return list(set([s.strip(" ") for s in sentences])) - - def _split_into_segments(self, text): - MAX_TOKENS = 490 - - paragraphs = text.split("\n") - tokenized_paragraphs = [ - self.qg_tokenizer(p)["input_ids"] for p in paragraphs if len(p) > 0 - ] - - segments = [] - while len(tokenized_paragraphs) > 0: - segment = [] - while len(segment) < MAX_TOKENS and len(tokenized_paragraphs) > 0: - paragraph = tokenized_paragraphs.pop(0) - segment.extend(paragraph) - segments.append(segment) - return [self.qg_tokenizer.decode(s) for s in segments] - - def _prepare_qg_inputs(self, sentences, text): - inputs = [] - answers = [] - - for sentence in sentences: - qg_input = "{} {} {} {}".format( - self.ANSWER_TOKEN, sentence, self.CONTEXT_TOKEN, text - ) - inputs.append(qg_input) - answers.append(sentence) - - return inputs, answers - - def _prepare_qg_inputs_MC(self, sentences): - - spacy_nlp = en_core_web_sm.load() - docs = list(spacy_nlp.pipe(sentences, disable=["parser"])) - inputs_from_text = [] - answers_from_text = [] - - for i in range(len(sentences)): - entities = docs[i].ents - if entities: - for entity in entities: - qg_input = "{} {} {} {}".format( - self.ANSWER_TOKEN, entity, self.CONTEXT_TOKEN, sentences[i] - ) - answers = self._get_MC_answers(entity, docs) - inputs_from_text.append(qg_input) - answers_from_text.append(answers) - - return inputs_from_text, answers_from_text - - def _get_MC_answers(self, correct_answer, docs): - - entities = [] - for doc in docs: - entities.extend([{"text": e.text, "label_": e.label_} for e in doc.ents]) - - # remove duplicate elements - entities_json = [json.dumps(kv) for kv in entities] - pool = set(entities_json) - num_choices = ( - min(4, len(pool)) - 1 - ) # -1 because we already have the correct answer - - # add the correct answer - final_choices = [] - correct_label = correct_answer.label_ - final_choices.append({"answer": correct_answer.text, "correct": True}) - pool.remove( - json.dumps({"text": correct_answer.text, "label_": correct_answer.label_}) - ) - - # find answers with the same NER label - matches = [e for e in pool if correct_label in e] - - # if we don't have enough then add some other random answers - if len(matches) < num_choices: - choices = matches - pool = pool.difference(set(choices)) - choices.extend(random.sample(pool, num_choices - len(choices))) - else: - choices = random.sample(matches, num_choices) - - choices = [json.loads(s) for s in choices] - for choice in choices: - final_choices.append({"answer": choice["text"], "correct": False}) - random.shuffle(final_choices) - return final_choices - - def _generate_question(self, qg_input): - self.qg_model.eval() - encoded_input = self._encode_qg_input(qg_input) - with torch.no_grad(): - output = self.qg_model.generate(input_ids=encoded_input["input_ids"]) - question = self.qg_tokenizer.decode(output[0], skip_special_tokens=True) - return question - - def _encode_qg_input(self, qg_input): - return self.qg_tokenizer( - qg_input, - padding='max_length', - max_length=self.SEQ_LENGTH, - truncation=True, - return_tensors="pt", - ).to(self.device) - - def _get_ranked_qa_pairs( - self, generated_questions, qg_answers, scores, num_questions=10 - ): - if num_questions > len(scores): - num_questions = len(scores) - print( - "\nWas only able to generate {} questions. For more questions, please input a longer text.".format( - num_questions - ) - ) - - qa_list = [] - for i in range(num_questions): - index = scores[i] - qa = self._make_dict( - generated_questions[index].split("?")[0] + "?", qg_answers[index] - ) - qa_list.append(qa) - return qa_list - - def _get_all_qa_pairs(self, generated_questions, qg_answers): - qa_list = [] - for i in range(len(generated_questions)): - qa = self._make_dict( - generated_questions[i].split("?")[0] + "?", qg_answers[i] - ) - qa_list.append(qa) - return qa_list - - def _make_dict(self, question, answer): - qa = {} - qa["question"] = question - qa["answer"] = answer - return qa - - -class QAEvaluator: - def __init__(self, model_dir=None): - - QAE_PRETRAINED = "iarfmoose/bert-base-cased-qa-evaluator" - self.SEQ_LENGTH = 512 - - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - self.qae_tokenizer = AutoTokenizer.from_pretrained(QAE_PRETRAINED) - self.qae_model = AutoModelForSequenceClassification.from_pretrained( - QAE_PRETRAINED - ) - self.qae_model.to(self.device) - - def encode_qa_pairs(self, questions, answers): - encoded_pairs = [] - for i in range(len(questions)): - encoded_qa = self._encode_qa(questions[i], answers[i]) - encoded_pairs.append(encoded_qa.to(self.device)) - return encoded_pairs - - def get_scores(self, encoded_qa_pairs): - scores = {} - self.qae_model.eval() - with torch.no_grad(): - for i in range(len(encoded_qa_pairs)): - scores[i] = self._evaluate_qa(encoded_qa_pairs[i]) - - return [ - k for k, v in sorted(scores.items(), key=lambda item: item[1], reverse=True) - ] - - def _encode_qa(self, question, answer): - if type(answer) is list: - for a in answer: - if a["correct"]: - correct_answer = a["answer"] - else: - correct_answer = answer - return self.qae_tokenizer( - text=question, - text_pair=correct_answer, - padding="max_length", - max_length=self.SEQ_LENGTH, - truncation=True, - return_tensors="pt", - ) - - def _evaluate_qa(self, encoded_qa_pair): - output = self.qae_model(**encoded_qa_pair) - return output[0][0][1] - - -def print_qa(qa_list, show_answers=True): - for i in range(len(qa_list)): - space = " " * int(np.where(i < 9, 3, 4)) # wider space for 2 digit q nums - - print("{}) Q: {}".format(i + 1, qa_list[i]["question"])) - - answer = qa_list[i]["answer"] - - # print a list of multiple choice answers - if type(answer) is list: - - if show_answers: - print( - "{}A: 1.".format(space), - answer[0]["answer"], - np.where(answer[0]["correct"], "(correct)", ""), - ) - for j in range(1, len(answer)): - print( - "{}{}.".format(space + " ", j + 1), - answer[j]["answer"], - np.where(answer[j]["correct"] == True, "(correct)", ""), - ) - - else: - print("{}A: 1.".format(space), answer[0]["answer"]) - for j in range(1, len(answer)): - print("{}{}.".format(space + " ", j + 1), answer[j]["answer"]) - print("") - - # print full sentence answers - else: - if show_answers: - print("{}A:".format(space), answer, "\n") \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/packaging/utils.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/packaging/utils.py deleted file mode 100644 index bab11b80c60f10a4f3bccb12eb5b17c48a449767..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/packaging/utils.py +++ /dev/null @@ -1,136 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import re -from typing import FrozenSet, NewType, Tuple, Union, cast - -from .tags import Tag, parse_tag -from .version import InvalidVersion, Version - -BuildTag = Union[Tuple[()], Tuple[int, str]] -NormalizedName = NewType("NormalizedName", str) - - -class InvalidWheelFilename(ValueError): - """ - An invalid wheel filename was found, users should refer to PEP 427. - """ - - -class InvalidSdistFilename(ValueError): - """ - An invalid sdist filename was found, users should refer to the packaging user guide. - """ - - -_canonicalize_regex = re.compile(r"[-_.]+") -# PEP 427: The build number must start with a digit. -_build_tag_regex = re.compile(r"(\d+)(.*)") - - -def canonicalize_name(name: str) -> NormalizedName: - # This is taken from PEP 503. - value = _canonicalize_regex.sub("-", name).lower() - return cast(NormalizedName, value) - - -def canonicalize_version(version: Union[Version, str]) -> str: - """ - This is very similar to Version.__str__, but has one subtle difference - with the way it handles the release segment. - """ - if isinstance(version, str): - try: - parsed = Version(version) - except InvalidVersion: - # Legacy versions cannot be normalized - return version - else: - parsed = version - - parts = [] - - # Epoch - if parsed.epoch != 0: - parts.append(f"{parsed.epoch}!") - - # Release segment - # NB: This strips trailing '.0's to normalize - parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release))) - - # Pre-release - if parsed.pre is not None: - parts.append("".join(str(x) for x in parsed.pre)) - - # Post-release - if parsed.post is not None: - parts.append(f".post{parsed.post}") - - # Development release - if parsed.dev is not None: - parts.append(f".dev{parsed.dev}") - - # Local version segment - if parsed.local is not None: - parts.append(f"+{parsed.local}") - - return "".join(parts) - - -def parse_wheel_filename( - filename: str, -) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: - if not filename.endswith(".whl"): - raise InvalidWheelFilename( - f"Invalid wheel filename (extension must be '.whl'): {filename}" - ) - - filename = filename[:-4] - dashes = filename.count("-") - if dashes not in (4, 5): - raise InvalidWheelFilename( - f"Invalid wheel filename (wrong number of parts): {filename}" - ) - - parts = filename.split("-", dashes - 2) - name_part = parts[0] - # See PEP 427 for the rules on escaping the project name - if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: - raise InvalidWheelFilename(f"Invalid project name: {filename}") - name = canonicalize_name(name_part) - version = Version(parts[1]) - if dashes == 5: - build_part = parts[2] - build_match = _build_tag_regex.match(build_part) - if build_match is None: - raise InvalidWheelFilename( - f"Invalid build number: {build_part} in '{filename}'" - ) - build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) - else: - build = () - tags = parse_tag(parts[-1]) - return (name, version, build, tags) - - -def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: - if filename.endswith(".tar.gz"): - file_stem = filename[: -len(".tar.gz")] - elif filename.endswith(".zip"): - file_stem = filename[: -len(".zip")] - else: - raise InvalidSdistFilename( - f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" - f" {filename}" - ) - - # We are requiring a PEP 440 version, which cannot contain dashes, - # so we split on the last dash. - name_part, sep, version_part = file_stem.rpartition("-") - if not sep: - raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") - - name = canonicalize_name(name_part) - version = Version(version_part) - return (name, version) diff --git a/spaces/Realcat/image-matching-webui/third_party/DKM/dkm/benchmarks/megadepth1500_benchmark.py b/spaces/Realcat/image-matching-webui/third_party/DKM/dkm/benchmarks/megadepth1500_benchmark.py deleted file mode 100644 index d9499f1e92fd4df3ad6fe59c37b6c881d5322a51..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/DKM/dkm/benchmarks/megadepth1500_benchmark.py +++ /dev/null @@ -1,123 +0,0 @@ -import numpy as np -import torch -from dkm.utils import * -from PIL import Image -from tqdm import tqdm -import torch.nn.functional as F - - -class Megadepth1500Benchmark: - def __init__(self, data_root="data/megadepth", scene_names=None) -> None: - if scene_names is None: - self.scene_names = [ - "0015_0.1_0.3.npz", - "0015_0.3_0.5.npz", - "0022_0.1_0.3.npz", - "0022_0.3_0.5.npz", - "0022_0.5_0.7.npz", - ] - else: - self.scene_names = scene_names - self.scenes = [ - np.load(f"{data_root}/{scene}", allow_pickle=True) - for scene in self.scene_names - ] - self.data_root = data_root - - def benchmark(self, model): - with torch.no_grad(): - data_root = self.data_root - tot_e_t, tot_e_R, tot_e_pose = [], [], [] - for scene_ind in range(len(self.scenes)): - scene = self.scenes[scene_ind] - pairs = scene["pair_infos"] - intrinsics = scene["intrinsics"] - poses = scene["poses"] - im_paths = scene["image_paths"] - pair_inds = range(len(pairs)) - for pairind in tqdm(pair_inds): - idx1, idx2 = pairs[pairind][0] - K1 = intrinsics[idx1].copy() - T1 = poses[idx1].copy() - R1, t1 = T1[:3, :3], T1[:3, 3] - K2 = intrinsics[idx2].copy() - T2 = poses[idx2].copy() - R2, t2 = T2[:3, :3], T2[:3, 3] - R, t = compute_relative_pose(R1, t1, R2, t2) - im1_path = f"{data_root}/{im_paths[idx1]}" - im2_path = f"{data_root}/{im_paths[idx2]}" - im1 = Image.open(im1_path) - w1, h1 = im1.size - im2 = Image.open(im2_path) - w2, h2 = im2.size - scale1 = 1200 / max(w1, h1) - scale2 = 1200 / max(w2, h2) - w1, h1 = scale1 * w1, scale1 * h1 - w2, h2 = scale2 * w2, scale2 * h2 - K1[:2] = K1[:2] * scale1 - K2[:2] = K2[:2] * scale2 - dense_matches, dense_certainty = model.match(im1_path, im2_path) - sparse_matches, _ = model.sample( - dense_matches, dense_certainty, 5000 - ) - kpts1 = sparse_matches[:, :2] - kpts1 = torch.stack( - ( - w1 * (kpts1[:, 0] + 1) / 2, - h1 * (kpts1[:, 1] + 1) / 2, - ), - axis=-1, - ) - kpts2 = sparse_matches[:, 2:] - kpts2 = torch.stack( - ( - w2 * (kpts2[:, 0] + 1) / 2, - h2 * (kpts2[:, 1] + 1) / 2, - ), - axis=-1, - ) - for _ in range(5): - shuffling = np.random.permutation(np.arange(len(kpts1))) - kpts1 = kpts1[shuffling] - kpts2 = kpts2[shuffling] - try: - norm_threshold = 0.5 / ( - np.mean(np.abs(K1[:2, :2])) - + np.mean(np.abs(K2[:2, :2])) - ) - R_est, t_est, mask = estimate_pose( - kpts1.cpu().numpy(), - kpts2.cpu().numpy(), - K1, - K2, - norm_threshold, - conf=0.99999, - ) - T1_to_2_est = np.concatenate((R_est, t_est), axis=-1) # - e_t, e_R = compute_pose_error(T1_to_2_est, R, t) - e_pose = max(e_t, e_R) - except Exception as e: - print(repr(e)) - e_t, e_R = 90, 90 - e_pose = max(e_t, e_R) - tot_e_t.append(e_t) - tot_e_R.append(e_R) - tot_e_pose.append(e_pose) - tot_e_pose = np.array(tot_e_pose) - thresholds = [5, 10, 20] - auc = pose_auc(tot_e_pose, thresholds) - acc_5 = (tot_e_pose < 5).mean() - acc_10 = (tot_e_pose < 10).mean() - acc_15 = (tot_e_pose < 15).mean() - acc_20 = (tot_e_pose < 20).mean() - map_5 = acc_5 - map_10 = np.mean([acc_5, acc_10]) - map_20 = np.mean([acc_5, acc_10, acc_15, acc_20]) - return { - "auc_5": auc[0], - "auc_10": auc[1], - "auc_20": auc[2], - "map_5": map_5, - "map_10": map_10, - "map_20": map_20, - } diff --git a/spaces/Realcat/image-matching-webui/third_party/d2net/megadepth_utils/preprocess_scene.py b/spaces/Realcat/image-matching-webui/third_party/d2net/megadepth_utils/preprocess_scene.py deleted file mode 100644 index 5364058829b7e45eabd61a32a591711645fc1ded..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/d2net/megadepth_utils/preprocess_scene.py +++ /dev/null @@ -1,215 +0,0 @@ -import argparse - -import imagesize - -import numpy as np - -import os - -parser = argparse.ArgumentParser(description="MegaDepth preprocessing script") - -parser.add_argument("--base_path", type=str, required=True, help="path to MegaDepth") -parser.add_argument("--scene_id", type=str, required=True, help="scene ID") - -parser.add_argument( - "--output_path", type=str, required=True, help="path to the output directory" -) - -args = parser.parse_args() - -base_path = args.base_path -# Remove the trailing / if need be. -if base_path[-1] in ["/", "\\"]: - base_path = base_path[:-1] -scene_id = args.scene_id - -base_depth_path = os.path.join(base_path, "phoenix/S6/zl548/MegaDepth_v1") -base_undistorted_sfm_path = os.path.join(base_path, "Undistorted_SfM") - -undistorted_sparse_path = os.path.join( - base_undistorted_sfm_path, scene_id, "sparse-txt" -) -if not os.path.exists(undistorted_sparse_path): - exit() - -depths_path = os.path.join(base_depth_path, scene_id, "dense0", "depths") -if not os.path.exists(depths_path): - exit() - -images_path = os.path.join(base_undistorted_sfm_path, scene_id, "images") -if not os.path.exists(images_path): - exit() - -# Process cameras.txt -with open(os.path.join(undistorted_sparse_path, "cameras.txt"), "r") as f: - raw = f.readlines()[3:] # skip the header - -camera_intrinsics = {} -for camera in raw: - camera = camera.split(" ") - camera_intrinsics[int(camera[0])] = [float(elem) for elem in camera[2:]] - -# Process points3D.txt -with open(os.path.join(undistorted_sparse_path, "points3D.txt"), "r") as f: - raw = f.readlines()[3:] # skip the header - -points3D = {} -for point3D in raw: - point3D = point3D.split(" ") - points3D[int(point3D[0])] = np.array( - [float(point3D[1]), float(point3D[2]), float(point3D[3])] - ) - -# Process images.txt -with open(os.path.join(undistorted_sparse_path, "images.txt"), "r") as f: - raw = f.readlines()[4:] # skip the header - -image_id_to_idx = {} -image_names = [] -raw_pose = [] -camera = [] -points3D_id_to_2D = [] -n_points3D = [] -for idx, (image, points) in enumerate(zip(raw[::2], raw[1::2])): - image = image.split(" ") - points = points.split(" ") - - image_id_to_idx[int(image[0])] = idx - - image_name = image[-1].strip("\n") - image_names.append(image_name) - - raw_pose.append([float(elem) for elem in image[1:-2]]) - camera.append(int(image[-2])) - current_points3D_id_to_2D = {} - for x, y, point3D_id in zip(points[::3], points[1::3], points[2::3]): - if int(point3D_id) == -1: - continue - current_points3D_id_to_2D[int(point3D_id)] = [float(x), float(y)] - points3D_id_to_2D.append(current_points3D_id_to_2D) - n_points3D.append(len(current_points3D_id_to_2D)) -n_images = len(image_names) - -# Image and depthmaps paths -image_paths = [] -depth_paths = [] -for image_name in image_names: - image_path = os.path.join(images_path, image_name) - - # Path to the depth file - depth_path = os.path.join(depths_path, "%s.h5" % os.path.splitext(image_name)[0]) - - if os.path.exists(depth_path): - # Check if depth map or background / foreground mask - file_size = os.stat(depth_path).st_size - # Rough estimate - 75KB might work as well - if file_size < 100 * 1024: - depth_paths.append(None) - image_paths.append(None) - else: - depth_paths.append(depth_path[len(base_path) + 1 :]) - image_paths.append(image_path[len(base_path) + 1 :]) - else: - depth_paths.append(None) - image_paths.append(None) - -# Camera configuration -intrinsics = [] -poses = [] -principal_axis = [] -points3D_id_to_ndepth = [] -for idx, image_name in enumerate(image_names): - if image_paths[idx] is None: - intrinsics.append(None) - poses.append(None) - principal_axis.append([0, 0, 0]) - points3D_id_to_ndepth.append({}) - continue - image_intrinsics = camera_intrinsics[camera[idx]] - K = np.zeros([3, 3]) - K[0, 0] = image_intrinsics[2] - K[0, 2] = image_intrinsics[4] - K[1, 1] = image_intrinsics[3] - K[1, 2] = image_intrinsics[5] - K[2, 2] = 1 - intrinsics.append(K) - - image_pose = raw_pose[idx] - qvec = image_pose[:4] - qvec = qvec / np.linalg.norm(qvec) - w, x, y, z = qvec - R = np.array( - [ - [1 - 2 * y * y - 2 * z * z, 2 * x * y - 2 * z * w, 2 * x * z + 2 * y * w], - [2 * x * y + 2 * z * w, 1 - 2 * x * x - 2 * z * z, 2 * y * z - 2 * x * w], - [2 * x * z - 2 * y * w, 2 * y * z + 2 * x * w, 1 - 2 * x * x - 2 * y * y], - ] - ) - principal_axis.append(R[2, :]) - t = image_pose[4:7] - # World-to-Camera pose - current_pose = np.zeros([4, 4]) - current_pose[:3, :3] = R - current_pose[:3, 3] = t - current_pose[3, 3] = 1 - # Camera-to-World pose - # pose = np.zeros([4, 4]) - # pose[: 3, : 3] = np.transpose(R) - # pose[: 3, 3] = -np.matmul(np.transpose(R), t) - # pose[3, 3] = 1 - poses.append(current_pose) - - current_points3D_id_to_ndepth = {} - for point3D_id in points3D_id_to_2D[idx].keys(): - p3d = points3D[point3D_id] - current_points3D_id_to_ndepth[point3D_id] = (np.dot(R[2, :], p3d) + t[2]) / ( - 0.5 * (K[0, 0] + K[1, 1]) - ) - points3D_id_to_ndepth.append(current_points3D_id_to_ndepth) -principal_axis = np.array(principal_axis) -angles = np.rad2deg( - np.arccos(np.clip(np.dot(principal_axis, np.transpose(principal_axis)), -1, 1)) -) - -# Compute overlap score -overlap_matrix = np.full([n_images, n_images], -1.0) -scale_ratio_matrix = np.full([n_images, n_images], -1.0) -for idx1 in range(n_images): - if image_paths[idx1] is None or depth_paths[idx1] is None: - continue - for idx2 in range(idx1 + 1, n_images): - if image_paths[idx2] is None or depth_paths[idx2] is None: - continue - matches = points3D_id_to_2D[idx1].keys() & points3D_id_to_2D[idx2].keys() - min_num_points3D = min( - len(points3D_id_to_2D[idx1]), len(points3D_id_to_2D[idx2]) - ) - overlap_matrix[idx1, idx2] = len(matches) / len( - points3D_id_to_2D[idx1] - ) # min_num_points3D - overlap_matrix[idx2, idx1] = len(matches) / len( - points3D_id_to_2D[idx2] - ) # min_num_points3D - if len(matches) == 0: - continue - points3D_id_to_ndepth1 = points3D_id_to_ndepth[idx1] - points3D_id_to_ndepth2 = points3D_id_to_ndepth[idx2] - nd1 = np.array([points3D_id_to_ndepth1[match] for match in matches]) - nd2 = np.array([points3D_id_to_ndepth2[match] for match in matches]) - min_scale_ratio = np.min(np.maximum(nd1 / nd2, nd2 / nd1)) - scale_ratio_matrix[idx1, idx2] = min_scale_ratio - scale_ratio_matrix[idx2, idx1] = min_scale_ratio - -np.savez( - os.path.join(args.output_path, "%s.npz" % scene_id), - image_paths=image_paths, - depth_paths=depth_paths, - intrinsics=intrinsics, - poses=poses, - overlap_matrix=overlap_matrix, - scale_ratio_matrix=scale_ratio_matrix, - angles=angles, - n_points3D=n_points3D, - points3D_id_to_2D=points3D_id_to_2D, - points3D_id_to_ndepth=points3D_id_to_ndepth, -) diff --git a/spaces/Reeve/Ohayou_Face/models/mtcnn/mtcnn.py b/spaces/Reeve/Ohayou_Face/models/mtcnn/mtcnn.py deleted file mode 100644 index 4deacabaaf35e315c363c9eada9ff0c41f2561e5..0000000000000000000000000000000000000000 --- a/spaces/Reeve/Ohayou_Face/models/mtcnn/mtcnn.py +++ /dev/null @@ -1,156 +0,0 @@ -import numpy as np -import torch -from PIL import Image -from models.mtcnn.mtcnn_pytorch.src.get_nets import PNet, RNet, ONet -from models.mtcnn.mtcnn_pytorch.src.box_utils import nms, calibrate_box, get_image_boxes, convert_to_square -from models.mtcnn.mtcnn_pytorch.src.first_stage import run_first_stage -from models.mtcnn.mtcnn_pytorch.src.align_trans import get_reference_facial_points, warp_and_crop_face - -device = 'cuda:0' - - -class MTCNN(): - def __init__(self): - print(device) - self.pnet = PNet().to(device) - self.rnet = RNet().to(device) - self.onet = ONet().to(device) - self.pnet.eval() - self.rnet.eval() - self.onet.eval() - self.refrence = get_reference_facial_points(default_square=True) - - def align(self, img): - _, landmarks = self.detect_faces(img) - if len(landmarks) == 0: - return None, None - facial5points = [[landmarks[0][j], landmarks[0][j + 5]] for j in range(5)] - warped_face, tfm = warp_and_crop_face(np.array(img), facial5points, self.refrence, crop_size=(112, 112)) - return Image.fromarray(warped_face), tfm - - def align_multi(self, img, limit=None, min_face_size=30.0): - boxes, landmarks = self.detect_faces(img, min_face_size) - if limit: - boxes = boxes[:limit] - landmarks = landmarks[:limit] - faces = [] - tfms = [] - for landmark in landmarks: - facial5points = [[landmark[j], landmark[j + 5]] for j in range(5)] - warped_face, tfm = warp_and_crop_face(np.array(img), facial5points, self.refrence, crop_size=(112, 112)) - faces.append(Image.fromarray(warped_face)) - tfms.append(tfm) - return boxes, faces, tfms - - def detect_faces(self, image, min_face_size=20.0, - thresholds=[0.15, 0.25, 0.35], - nms_thresholds=[0.7, 0.7, 0.7]): - """ - Arguments: - image: an instance of PIL.Image. - min_face_size: a float number. - thresholds: a list of length 3. - nms_thresholds: a list of length 3. - - Returns: - two float numpy arrays of shapes [n_boxes, 4] and [n_boxes, 10], - bounding boxes and facial landmarks. - """ - - # BUILD AN IMAGE PYRAMID - width, height = image.size - min_length = min(height, width) - - min_detection_size = 12 - factor = 0.707 # sqrt(0.5) - - # scales for scaling the image - scales = [] - - # scales the image so that - # minimum size that we can detect equals to - # minimum face size that we want to detect - m = min_detection_size / min_face_size - min_length *= m - - factor_count = 0 - while min_length > min_detection_size: - scales.append(m * factor ** factor_count) - min_length *= factor - factor_count += 1 - - # STAGE 1 - - # it will be returned - bounding_boxes = [] - - with torch.no_grad(): - # run P-Net on different scales - for s in scales: - boxes = run_first_stage(image, self.pnet, scale=s, threshold=thresholds[0]) - bounding_boxes.append(boxes) - - # collect boxes (and offsets, and scores) from different scales - bounding_boxes = [i for i in bounding_boxes if i is not None] - bounding_boxes = np.vstack(bounding_boxes) - - keep = nms(bounding_boxes[:, 0:5], nms_thresholds[0]) - bounding_boxes = bounding_boxes[keep] - - # use offsets predicted by pnet to transform bounding boxes - bounding_boxes = calibrate_box(bounding_boxes[:, 0:5], bounding_boxes[:, 5:]) - # shape [n_boxes, 5] - - bounding_boxes = convert_to_square(bounding_boxes) - bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4]) - - # STAGE 2 - - img_boxes = get_image_boxes(bounding_boxes, image, size=24) - img_boxes = torch.FloatTensor(img_boxes).to(device) - - output = self.rnet(img_boxes) - offsets = output[0].cpu().data.numpy() # shape [n_boxes, 4] - probs = output[1].cpu().data.numpy() # shape [n_boxes, 2] - - keep = np.where(probs[:, 1] > thresholds[1])[0] - bounding_boxes = bounding_boxes[keep] - bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,)) - offsets = offsets[keep] - - keep = nms(bounding_boxes, nms_thresholds[1]) - bounding_boxes = bounding_boxes[keep] - bounding_boxes = calibrate_box(bounding_boxes, offsets[keep]) - bounding_boxes = convert_to_square(bounding_boxes) - bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4]) - - # STAGE 3 - - img_boxes = get_image_boxes(bounding_boxes, image, size=48) - if len(img_boxes) == 0: - return [], [] - img_boxes = torch.FloatTensor(img_boxes).to(device) - output = self.onet(img_boxes) - landmarks = output[0].cpu().data.numpy() # shape [n_boxes, 10] - offsets = output[1].cpu().data.numpy() # shape [n_boxes, 4] - probs = output[2].cpu().data.numpy() # shape [n_boxes, 2] - - keep = np.where(probs[:, 1] > thresholds[2])[0] - bounding_boxes = bounding_boxes[keep] - bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,)) - offsets = offsets[keep] - landmarks = landmarks[keep] - - # compute landmark points - width = bounding_boxes[:, 2] - bounding_boxes[:, 0] + 1.0 - height = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0 - xmin, ymin = bounding_boxes[:, 0], bounding_boxes[:, 1] - landmarks[:, 0:5] = np.expand_dims(xmin, 1) + np.expand_dims(width, 1) * landmarks[:, 0:5] - landmarks[:, 5:10] = np.expand_dims(ymin, 1) + np.expand_dims(height, 1) * landmarks[:, 5:10] - - bounding_boxes = calibrate_box(bounding_boxes, offsets) - keep = nms(bounding_boxes, nms_thresholds[2], mode='min') - bounding_boxes = bounding_boxes[keep] - landmarks = landmarks[keep] - - return bounding_boxes, landmarks diff --git a/spaces/Ricdeq/optimaldesign/README.md b/spaces/Ricdeq/optimaldesign/README.md deleted file mode 100644 index 25fc302441d243453be82ffef2c16619f7a559f2..0000000000000000000000000000000000000000 --- a/spaces/Ricdeq/optimaldesign/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Optimaldesign -emoji: 📊 -colorFrom: yellow -colorTo: blue -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Ricecake123/RVC-demo/lib/infer_pack/attentions.py b/spaces/Ricecake123/RVC-demo/lib/infer_pack/attentions.py deleted file mode 100644 index 05501be1871643f78dddbeaa529c96667031a8db..0000000000000000000000000000000000000000 --- a/spaces/Ricecake123/RVC-demo/lib/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from lib.infer_pack import commons -from lib.infer_pack import modules -from lib.infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/hswish.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/hswish.py deleted file mode 100644 index 7e0c090ff037c99ee6c5c84c4592e87beae02208..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/hswish.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn - -from .registry import ACTIVATION_LAYERS - - -@ACTIVATION_LAYERS.register_module() -class HSwish(nn.Module): - """Hard Swish Module. - - This module applies the hard swish function: - - .. math:: - Hswish(x) = x * ReLU6(x + 3) / 6 - - Args: - inplace (bool): can optionally do the operation in-place. - Default: False. - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, inplace=False): - super(HSwish, self).__init__() - self.act = nn.ReLU6(inplace) - - def forward(self, x): - return x * self.act(x + 3) / 6 diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/parallel/collate.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/parallel/collate.py deleted file mode 100644 index ad749197df21b0d74297548be5f66a696adebf7f..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/parallel/collate.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections.abc import Mapping, Sequence - -import torch -import torch.nn.functional as F -from torch.utils.data.dataloader import default_collate - -from .data_container import DataContainer - - -def collate(batch, samples_per_gpu=1): - """Puts each data field into a tensor/DataContainer with outer dimension - batch size. - - Extend default_collate to add support for - :type:`~mmcv.parallel.DataContainer`. There are 3 cases. - - 1. cpu_only = True, e.g., meta data - 2. cpu_only = False, stack = True, e.g., images tensors - 3. cpu_only = False, stack = False, e.g., gt bboxes - """ - - if not isinstance(batch, Sequence): - raise TypeError(f'{batch.dtype} is not supported.') - - if isinstance(batch[0], DataContainer): - stacked = [] - if batch[0].cpu_only: - for i in range(0, len(batch), samples_per_gpu): - stacked.append( - [sample.data for sample in batch[i:i + samples_per_gpu]]) - return DataContainer( - stacked, batch[0].stack, batch[0].padding_value, cpu_only=True) - elif batch[0].stack: - for i in range(0, len(batch), samples_per_gpu): - assert isinstance(batch[i].data, torch.Tensor) - - if batch[i].pad_dims is not None: - ndim = batch[i].dim() - assert ndim > batch[i].pad_dims - max_shape = [0 for _ in range(batch[i].pad_dims)] - for dim in range(1, batch[i].pad_dims + 1): - max_shape[dim - 1] = batch[i].size(-dim) - for sample in batch[i:i + samples_per_gpu]: - for dim in range(0, ndim - batch[i].pad_dims): - assert batch[i].size(dim) == sample.size(dim) - for dim in range(1, batch[i].pad_dims + 1): - max_shape[dim - 1] = max(max_shape[dim - 1], - sample.size(-dim)) - padded_samples = [] - for sample in batch[i:i + samples_per_gpu]: - pad = [0 for _ in range(batch[i].pad_dims * 2)] - for dim in range(1, batch[i].pad_dims + 1): - pad[2 * dim - - 1] = max_shape[dim - 1] - sample.size(-dim) - padded_samples.append( - F.pad( - sample.data, pad, value=sample.padding_value)) - stacked.append(default_collate(padded_samples)) - elif batch[i].pad_dims is None: - stacked.append( - default_collate([ - sample.data - for sample in batch[i:i + samples_per_gpu] - ])) - else: - raise ValueError( - 'pad_dims should be either None or integers (1-3)') - - else: - for i in range(0, len(batch), samples_per_gpu): - stacked.append( - [sample.data for sample in batch[i:i + samples_per_gpu]]) - return DataContainer(stacked, batch[0].stack, batch[0].padding_value) - elif isinstance(batch[0], Sequence): - transposed = zip(*batch) - return [collate(samples, samples_per_gpu) for samples in transposed] - elif isinstance(batch[0], Mapping): - return { - key: collate([d[key] for d in batch], samples_per_gpu) - for key in batch[0] - } - else: - return default_collate(batch) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/rpn_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/rpn_head.py deleted file mode 100644 index a888cb8c188ca6fe63045b6230266553fbe8c996..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/rpn_head.py +++ /dev/null @@ -1,236 +0,0 @@ -import copy -import warnings - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv import ConfigDict -from mmcv.cnn import normal_init -from mmcv.ops import batched_nms - -from ..builder import HEADS -from .anchor_head import AnchorHead -from .rpn_test_mixin import RPNTestMixin - - -@HEADS.register_module() -class RPNHead(RPNTestMixin, AnchorHead): - """RPN head. - - Args: - in_channels (int): Number of channels in the input feature map. - """ # noqa: W605 - - def __init__(self, in_channels, **kwargs): - super(RPNHead, self).__init__(1, in_channels, **kwargs) - - def _init_layers(self): - """Initialize layers of the head.""" - self.rpn_conv = nn.Conv2d( - self.in_channels, self.feat_channels, 3, padding=1) - self.rpn_cls = nn.Conv2d(self.feat_channels, - self.num_anchors * self.cls_out_channels, 1) - self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) - - def init_weights(self): - """Initialize weights of the head.""" - normal_init(self.rpn_conv, std=0.01) - normal_init(self.rpn_cls, std=0.01) - normal_init(self.rpn_reg, std=0.01) - - def forward_single(self, x): - """Forward feature map of a single scale level.""" - x = self.rpn_conv(x) - x = F.relu(x, inplace=True) - rpn_cls_score = self.rpn_cls(x) - rpn_bbox_pred = self.rpn_reg(x) - return rpn_cls_score, rpn_bbox_pred - - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - losses = super(RPNHead, self).loss( - cls_scores, - bbox_preds, - gt_bboxes, - None, - img_metas, - gt_bboxes_ignore=gt_bboxes_ignore) - return dict( - loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox']) - - def _get_bboxes(self, - cls_scores, - bbox_preds, - mlvl_anchors, - img_shapes, - scale_factors, - cfg, - rescale=False): - """Transform outputs for a single batch item into bbox predictions. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W). - mlvl_anchors (list[Tensor]): Box reference for each scale level - with shape (num_total_anchors, 4). - img_shapes (list[tuple[int]]): Shape of the input image, - (height, width, 3). - scale_factors (list[ndarray]): Scale factor of the image arange as - (w_scale, h_scale, w_scale, h_scale). - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where the first 4 columns - are bounding box positions (tl_x, tl_y, br_x, br_y) and the - 5-th column is a score between 0 and 1. The second item is a - (n,) tensor where each item is the predicted class labelof the - corresponding box. - """ - cfg = self.test_cfg if cfg is None else cfg - cfg = copy.deepcopy(cfg) - # bboxes from different level should be independent during NMS, - # level_ids are used as labels for batched NMS to separate them - level_ids = [] - mlvl_scores = [] - mlvl_bbox_preds = [] - mlvl_valid_anchors = [] - batch_size = cls_scores[0].shape[0] - nms_pre_tensor = torch.tensor( - cfg.nms_pre, device=cls_scores[0].device, dtype=torch.long) - for idx in range(len(cls_scores)): - rpn_cls_score = cls_scores[idx] - rpn_bbox_pred = bbox_preds[idx] - assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] - rpn_cls_score = rpn_cls_score.permute(0, 2, 3, 1) - if self.use_sigmoid_cls: - rpn_cls_score = rpn_cls_score.reshape(batch_size, -1) - scores = rpn_cls_score.sigmoid() - else: - rpn_cls_score = rpn_cls_score.reshape(batch_size, -1, 2) - # We set FG labels to [0, num_class-1] and BG label to - # num_class in RPN head since mmdet v2.5, which is unified to - # be consistent with other head since mmdet v2.0. In mmdet v2.0 - # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. - scores = rpn_cls_score.softmax(-1)[..., 0] - rpn_bbox_pred = rpn_bbox_pred.permute(0, 2, 3, 1).reshape( - batch_size, -1, 4) - anchors = mlvl_anchors[idx] - anchors = anchors.expand_as(rpn_bbox_pred) - if nms_pre_tensor > 0: - # sort is faster than topk - # _, topk_inds = scores.topk(cfg.nms_pre) - # keep topk op for dynamic k in onnx model - if torch.onnx.is_in_onnx_export(): - # sort op will be converted to TopK in onnx - # and k<=3480 in TensorRT - scores_shape = torch._shape_as_tensor(scores) - nms_pre = torch.where(scores_shape[1] < nms_pre_tensor, - scores_shape[1], nms_pre_tensor) - _, topk_inds = scores.topk(nms_pre) - batch_inds = torch.arange(batch_size).view( - -1, 1).expand_as(topk_inds) - scores = scores[batch_inds, topk_inds] - rpn_bbox_pred = rpn_bbox_pred[batch_inds, topk_inds, :] - anchors = anchors[batch_inds, topk_inds, :] - - elif scores.shape[-1] > cfg.nms_pre: - ranked_scores, rank_inds = scores.sort(descending=True) - topk_inds = rank_inds[:, :cfg.nms_pre] - scores = ranked_scores[:, :cfg.nms_pre] - batch_inds = torch.arange(batch_size).view( - -1, 1).expand_as(topk_inds) - rpn_bbox_pred = rpn_bbox_pred[batch_inds, topk_inds, :] - anchors = anchors[batch_inds, topk_inds, :] - - mlvl_scores.append(scores) - mlvl_bbox_preds.append(rpn_bbox_pred) - mlvl_valid_anchors.append(anchors) - level_ids.append( - scores.new_full(( - batch_size, - scores.size(1), - ), - idx, - dtype=torch.long)) - - batch_mlvl_scores = torch.cat(mlvl_scores, dim=1) - batch_mlvl_anchors = torch.cat(mlvl_valid_anchors, dim=1) - batch_mlvl_rpn_bbox_pred = torch.cat(mlvl_bbox_preds, dim=1) - batch_mlvl_proposals = self.bbox_coder.decode( - batch_mlvl_anchors, batch_mlvl_rpn_bbox_pred, max_shape=img_shapes) - batch_mlvl_ids = torch.cat(level_ids, dim=1) - - # deprecate arguments warning - if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: - warnings.warn( - 'In rpn_proposal or test_cfg, ' - 'nms_thr has been moved to a dict named nms as ' - 'iou_threshold, max_num has been renamed as max_per_img, ' - 'name of original arguments and the way to specify ' - 'iou_threshold of NMS will be deprecated.') - if 'nms' not in cfg: - cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) - if 'max_num' in cfg: - if 'max_per_img' in cfg: - assert cfg.max_num == cfg.max_per_img, f'You ' \ - f'set max_num and ' \ - f'max_per_img at the same time, but get {cfg.max_num} ' \ - f'and {cfg.max_per_img} respectively' \ - 'Please delete max_num which will be deprecated.' - else: - cfg.max_per_img = cfg.max_num - if 'nms_thr' in cfg: - assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \ - f' iou_threshold in nms and ' \ - f'nms_thr at the same time, but get' \ - f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \ - f' respectively. Please delete the nms_thr ' \ - f'which will be deprecated.' - - result_list = [] - for (mlvl_proposals, mlvl_scores, - mlvl_ids) in zip(batch_mlvl_proposals, batch_mlvl_scores, - batch_mlvl_ids): - # Skip nonzero op while exporting to ONNX - if cfg.min_bbox_size > 0 and (not torch.onnx.is_in_onnx_export()): - w = mlvl_proposals[:, 2] - mlvl_proposals[:, 0] - h = mlvl_proposals[:, 3] - mlvl_proposals[:, 1] - valid_ind = torch.nonzero( - (w >= cfg.min_bbox_size) - & (h >= cfg.min_bbox_size), - as_tuple=False).squeeze() - if valid_ind.sum().item() != len(mlvl_proposals): - mlvl_proposals = mlvl_proposals[valid_ind, :] - mlvl_scores = mlvl_scores[valid_ind] - mlvl_ids = mlvl_ids[valid_ind] - - dets, keep = batched_nms(mlvl_proposals, mlvl_scores, mlvl_ids, - cfg.nms) - result_list.append(dets[:cfg.max_per_img]) - return result_list diff --git a/spaces/Rongjiehuang/GenerSpeech/utils/cwt.py b/spaces/Rongjiehuang/GenerSpeech/utils/cwt.py deleted file mode 100644 index 1a08461b9e422aac614438e6240b7355b8e4bb2c..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/GenerSpeech/utils/cwt.py +++ /dev/null @@ -1,146 +0,0 @@ -import librosa -import numpy as np -from pycwt import wavelet -from scipy.interpolate import interp1d - - -def load_wav(wav_file, sr): - wav, _ = librosa.load(wav_file, sr=sr, mono=True) - return wav - - -def convert_continuos_f0(f0): - '''CONVERT F0 TO CONTINUOUS F0 - Args: - f0 (ndarray): original f0 sequence with the shape (T) - Return: - (ndarray): continuous f0 with the shape (T) - ''' - # get uv information as binary - f0 = np.copy(f0) - uv = np.float32(f0 != 0) - - # get start and end of f0 - if (f0 == 0).all(): - print("| all of the f0 values are 0.") - return uv, f0 - start_f0 = f0[f0 != 0][0] - end_f0 = f0[f0 != 0][-1] - - # padding start and end of f0 sequence - start_idx = np.where(f0 == start_f0)[0][0] - end_idx = np.where(f0 == end_f0)[0][-1] - f0[:start_idx] = start_f0 - f0[end_idx:] = end_f0 - - # get non-zero frame index - nz_frames = np.where(f0 != 0)[0] - - # perform linear interpolation - f = interp1d(nz_frames, f0[nz_frames]) - cont_f0 = f(np.arange(0, f0.shape[0])) - - return uv, cont_f0 - - -def get_cont_lf0(f0, frame_period=5.0): - uv, cont_f0_lpf = convert_continuos_f0(f0) - # cont_f0_lpf = low_pass_filter(cont_f0_lpf, int(1.0 / (frame_period * 0.001)), cutoff=20) - cont_lf0_lpf = np.log(cont_f0_lpf) - return uv, cont_lf0_lpf - - -def get_lf0_cwt(lf0): - ''' - input: - signal of shape (N) - output: - Wavelet_lf0 of shape(10, N), scales of shape(10) - ''' - mother = wavelet.MexicanHat() - dt = 0.005 - dj = 1 - s0 = dt * 2 - J = 9 - - Wavelet_lf0, scales, _, _, _, _ = wavelet.cwt(np.squeeze(lf0), dt, dj, s0, J, mother) - # Wavelet.shape => (J + 1, len(lf0)) - Wavelet_lf0 = np.real(Wavelet_lf0).T - return Wavelet_lf0, scales - - -def norm_scale(Wavelet_lf0): - Wavelet_lf0_norm = np.zeros((Wavelet_lf0.shape[0], Wavelet_lf0.shape[1])) - mean = Wavelet_lf0.mean(0)[None, :] - std = Wavelet_lf0.std(0)[None, :] - Wavelet_lf0_norm = (Wavelet_lf0 - mean) / std - return Wavelet_lf0_norm, mean, std - - -def normalize_cwt_lf0(f0, mean, std): - uv, cont_lf0_lpf = get_cont_lf0(f0) - cont_lf0_norm = (cont_lf0_lpf - mean) / std - Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_norm) - Wavelet_lf0_norm, _, _ = norm_scale(Wavelet_lf0) - - return Wavelet_lf0_norm - - -def get_lf0_cwt_norm(f0s, mean, std): - uvs = list() - cont_lf0_lpfs = list() - cont_lf0_lpf_norms = list() - Wavelet_lf0s = list() - Wavelet_lf0s_norm = list() - scaless = list() - - means = list() - stds = list() - for f0 in f0s: - uv, cont_lf0_lpf = get_cont_lf0(f0) - cont_lf0_lpf_norm = (cont_lf0_lpf - mean) / std - - Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_lpf_norm) # [560,10] - Wavelet_lf0_norm, mean_scale, std_scale = norm_scale(Wavelet_lf0) # [560,10],[1,10],[1,10] - - Wavelet_lf0s_norm.append(Wavelet_lf0_norm) - uvs.append(uv) - cont_lf0_lpfs.append(cont_lf0_lpf) - cont_lf0_lpf_norms.append(cont_lf0_lpf_norm) - Wavelet_lf0s.append(Wavelet_lf0) - scaless.append(scales) - means.append(mean_scale) - stds.append(std_scale) - - return Wavelet_lf0s_norm, scaless, means, stds - - -def inverse_cwt_torch(Wavelet_lf0, scales): - import torch - b = ((torch.arange(0, len(scales)).float().to(Wavelet_lf0.device)[None, None, :] + 1 + 2.5) ** (-2.5)) - lf0_rec = Wavelet_lf0 * b - lf0_rec_sum = lf0_rec.sum(-1) - lf0_rec_sum = (lf0_rec_sum - lf0_rec_sum.mean(-1, keepdim=True)) / lf0_rec_sum.std(-1, keepdim=True) - return lf0_rec_sum - - -def inverse_cwt(Wavelet_lf0, scales): - b = ((np.arange(0, len(scales))[None, None, :] + 1 + 2.5) ** (-2.5)) - lf0_rec = Wavelet_lf0 * b - lf0_rec_sum = lf0_rec.sum(-1) - lf0_rec_sum = (lf0_rec_sum - lf0_rec_sum.mean(-1, keepdims=True)) / lf0_rec_sum.std(-1, keepdims=True) - return lf0_rec_sum - - -def cwt2f0(cwt_spec, mean, std, cwt_scales): - assert len(mean.shape) == 1 and len(std.shape) == 1 and len(cwt_spec.shape) == 3 - import torch - if isinstance(cwt_spec, torch.Tensor): - f0 = inverse_cwt_torch(cwt_spec, cwt_scales) - f0 = f0 * std[:, None] + mean[:, None] - f0 = f0.exp() # [B, T] - else: - f0 = inverse_cwt(cwt_spec, cwt_scales) - f0 = f0 * std[:, None] + mean[:, None] - f0 = np.exp(f0) # [B, T] - return f0 diff --git a/spaces/SAAZIZI/SummarizeAV/resource_loader/__init__.py b/spaces/SAAZIZI/SummarizeAV/resource_loader/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Salesforce/EDICT/my_diffusers/pipelines/stochastic_karras_ve/__init__.py b/spaces/Salesforce/EDICT/my_diffusers/pipelines/stochastic_karras_ve/__init__.py deleted file mode 100644 index db2582043781130794e01b96b3e6beecbfe9f369..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/EDICT/my_diffusers/pipelines/stochastic_karras_ve/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# flake8: noqa -from .pipeline_stochastic_karras_ve import KarrasVePipeline diff --git a/spaces/Salesforce/EDICT/my_half_diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py b/spaces/Salesforce/EDICT/my_half_diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py deleted file mode 100644 index 4979d88feee933483ac49c5cf71eef590d8fb34c..0000000000000000000000000000000000000000 --- a/spaces/Salesforce/EDICT/my_half_diffusers/pipelines/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py +++ /dev/null @@ -1,108 +0,0 @@ -import inspect -import warnings -from typing import Optional, Tuple, Union - -import torch - -from ...models import UNet2DModel, VQModel -from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput -from ...schedulers import DDIMScheduler - - -class LDMPipeline(DiffusionPipeline): - r""" - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Parameters: - vqvae ([`VQModel`]): - Vector-quantized (VQ) Model to encode and decode images to and from latent representations. - unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - [`DDIMScheduler`] is to be used in combination with `unet` to denoise the encoded image latens. - """ - - def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler): - super().__init__() - scheduler = scheduler.set_format("pt") - self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) - - @torch.no_grad() - def __call__( - self, - batch_size: int = 1, - generator: Optional[torch.Generator] = None, - eta: float = 0.0, - num_inference_steps: int = 50, - output_type: Optional[str] = "pil", - return_dict: bool = True, - **kwargs, - ) -> Union[Tuple, ImagePipelineOutput]: - - r""" - Args: - batch_size (`int`, *optional*, defaults to 1): - Number of images to generate. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple. - - Returns: - [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if - `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the - generated images. - """ - - if "torch_device" in kwargs: - device = kwargs.pop("torch_device") - warnings.warn( - "`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0." - " Consider using `pipe.to(torch_device)` instead." - ) - - # Set device as before (to be removed in 0.3.0) - if device is None: - device = "cuda" if torch.cuda.is_available() else "cpu" - self.to(device) - - latents = torch.randn( - (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size), - generator=generator, - ) - latents = latents.to(self.device) - - self.scheduler.set_timesteps(num_inference_steps) - - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - - extra_kwargs = {} - if accepts_eta: - extra_kwargs["eta"] = eta - - for t in self.progress_bar(self.scheduler.timesteps): - # predict the noise residual - noise_prediction = self.unet(latents, t).sample - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_prediction, t, latents, **extra_kwargs).prev_sample - - # decode the image latents with the VAE - image = self.vqvae.decode(latents).sample - - image = (image / 2 + 0.5).clamp(0, 1) - image = image.cpu().permute(0, 2, 3, 1).numpy() - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image,) - - return ImagePipelineOutput(images=image) diff --git a/spaces/SaulLu/bloom-generations-viewer/README.md b/spaces/SaulLu/bloom-generations-viewer/README.md deleted file mode 100644 index bc7e92f7335fe218b4211721602d540e43844892..0000000000000000000000000000000000000000 --- a/spaces/SaulLu/bloom-generations-viewer/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Bloom Generations Viewer -emoji: 🌍 -colorFrom: gray -colorTo: purple -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ServerX/PorcoDiaz/demucs/separate.py b/spaces/ServerX/PorcoDiaz/demucs/separate.py deleted file mode 100644 index 3fc7af9e711978b3e21398aa6f1deb9ae87dd370..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/demucs/separate.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import sys -from pathlib import Path -import subprocess - -import julius -import torch as th -import torchaudio as ta - -from .audio import AudioFile, convert_audio_channels -from .pretrained import is_pretrained, load_pretrained -from .utils import apply_model, load_model - - -def load_track(track, device, audio_channels, samplerate): - errors = {} - wav = None - - try: - wav = AudioFile(track).read( - streams=0, - samplerate=samplerate, - channels=audio_channels).to(device) - except FileNotFoundError: - errors['ffmpeg'] = 'Ffmpeg is not installed.' - except subprocess.CalledProcessError: - errors['ffmpeg'] = 'FFmpeg could not read the file.' - - if wav is None: - try: - wav, sr = ta.load(str(track)) - except RuntimeError as err: - errors['torchaudio'] = err.args[0] - else: - wav = convert_audio_channels(wav, audio_channels) - wav = wav.to(device) - wav = julius.resample_frac(wav, sr, samplerate) - - if wav is None: - print(f"Could not load file {track}. " - "Maybe it is not a supported file format? ") - for backend, error in errors.items(): - print(f"When trying to load using {backend}, got the following error: {error}") - sys.exit(1) - return wav - - -def encode_mp3(wav, path, bitrate=320, samplerate=44100, channels=2, verbose=False): - try: - import lameenc - except ImportError: - print("Failed to call lame encoder. Maybe it is not installed? " - "On windows, run `python.exe -m pip install -U lameenc`, " - "on OSX/Linux, run `python3 -m pip install -U lameenc`, " - "then try again.", file=sys.stderr) - sys.exit(1) - encoder = lameenc.Encoder() - encoder.set_bit_rate(bitrate) - encoder.set_in_sample_rate(samplerate) - encoder.set_channels(channels) - encoder.set_quality(2) # 2-highest, 7-fastest - if not verbose: - encoder.silence() - wav = wav.transpose(0, 1).numpy() - mp3_data = encoder.encode(wav.tobytes()) - mp3_data += encoder.flush() - with open(path, "wb") as f: - f.write(mp3_data) - - -def main(): - parser = argparse.ArgumentParser("demucs.separate", - description="Separate the sources for the given tracks") - parser.add_argument("tracks", nargs='+', type=Path, default=[], help='Path to tracks') - parser.add_argument("-n", - "--name", - default="demucs_quantized", - help="Model name. See README.md for the list of pretrained models. " - "Default is demucs_quantized.") - parser.add_argument("-v", "--verbose", action="store_true") - parser.add_argument("-o", - "--out", - type=Path, - default=Path("separated"), - help="Folder where to put extracted tracks. A subfolder " - "with the model name will be created.") - parser.add_argument("--models", - type=Path, - default=Path("models"), - help="Path to trained models. " - "Also used to store downloaded pretrained models") - parser.add_argument("-d", - "--device", - default="cuda" if th.cuda.is_available() else "cpu", - help="Device to use, default is cuda if available else cpu") - parser.add_argument("--shifts", - default=0, - type=int, - help="Number of random shifts for equivariant stabilization." - "Increase separation time but improves quality for Demucs. 10 was used " - "in the original paper.") - parser.add_argument("--overlap", - default=0.25, - type=float, - help="Overlap between the splits.") - parser.add_argument("--no-split", - action="store_false", - dest="split", - default=True, - help="Doesn't split audio in chunks. This can use large amounts of memory.") - parser.add_argument("--float32", - action="store_true", - help="Convert the output wavefile to use pcm f32 format instead of s16. " - "This should not make a difference if you just plan on listening to the " - "audio but might be needed to compute exactly metrics like SDR etc.") - parser.add_argument("--int16", - action="store_false", - dest="float32", - help="Opposite of --float32, here for compatibility.") - parser.add_argument("--mp3", action="store_true", - help="Convert the output wavs to mp3.") - parser.add_argument("--mp3-bitrate", - default=320, - type=int, - help="Bitrate of converted mp3.") - - args = parser.parse_args() - name = args.name + ".th" - model_path = args.models / name - if model_path.is_file(): - model = load_model(model_path) - else: - if is_pretrained(args.name): - model = load_pretrained(args.name) - else: - print(f"No pre-trained model {args.name}", file=sys.stderr) - sys.exit(1) - model.to(args.device) - - out = args.out / args.name - out.mkdir(parents=True, exist_ok=True) - print(f"Separated tracks will be stored in {out.resolve()}") - for track in args.tracks: - if not track.exists(): - print( - f"File {track} does not exist. If the path contains spaces, " - "please try again after surrounding the entire path with quotes \"\".", - file=sys.stderr) - continue - print(f"Separating track {track}") - wav = load_track(track, args.device, model.audio_channels, model.samplerate) - - ref = wav.mean(0) - wav = (wav - ref.mean()) / ref.std() - sources = apply_model(model, wav, shifts=args.shifts, split=args.split, - overlap=args.overlap, progress=True) - sources = sources * ref.std() + ref.mean() - - track_folder = out / track.name.rsplit(".", 1)[0] - track_folder.mkdir(exist_ok=True) - for source, name in zip(sources, model.sources): - source = source / max(1.01 * source.abs().max(), 1) - if args.mp3 or not args.float32: - source = (source * 2**15).clamp_(-2**15, 2**15 - 1).short() - source = source.cpu() - stem = str(track_folder / name) - if args.mp3: - encode_mp3(source, stem + ".mp3", - bitrate=args.mp3_bitrate, - samplerate=model.samplerate, - channels=model.audio_channels, - verbose=args.verbose) - else: - wavname = str(track_folder / f"{name}.wav") - ta.save(wavname, source, sample_rate=model.samplerate) - - -if __name__ == "__main__": - main() diff --git a/spaces/Silentlin/DiffSinger/vocoders/base_vocoder.py b/spaces/Silentlin/DiffSinger/vocoders/base_vocoder.py deleted file mode 100644 index fe49a9e4f790ecdc5e76d60a23f96602b59fc48d..0000000000000000000000000000000000000000 --- a/spaces/Silentlin/DiffSinger/vocoders/base_vocoder.py +++ /dev/null @@ -1,39 +0,0 @@ -import importlib -VOCODERS = {} - - -def register_vocoder(cls): - VOCODERS[cls.__name__.lower()] = cls - VOCODERS[cls.__name__] = cls - return cls - - -def get_vocoder_cls(hparams): - if hparams['vocoder'] in VOCODERS: - return VOCODERS[hparams['vocoder']] - else: - vocoder_cls = hparams['vocoder'] - pkg = ".".join(vocoder_cls.split(".")[:-1]) - cls_name = vocoder_cls.split(".")[-1] - vocoder_cls = getattr(importlib.import_module(pkg), cls_name) - return vocoder_cls - - -class BaseVocoder: - def spec2wav(self, mel): - """ - - :param mel: [T, 80] - :return: wav: [T'] - """ - - raise NotImplementedError - - @staticmethod - def wav2spec(wav_fn): - """ - - :param wav_fn: str - :return: wav, mel: [T, 80] - """ - raise NotImplementedError diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/options.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/options.py deleted file mode 100644 index 3260fb74f9bacd12e0606a49229beb0459904f1a..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/options.py +++ /dev/null @@ -1,36 +0,0 @@ -from clickhouse_connect.driver.exceptions import NotSupportedError - -try: - import numpy as np -except ImportError: - np = None - -try: - import pandas as pd - pd_extended_dtypes = not pd.__version__.startswith('0') -except ImportError: - pd = None - pd_extended_dtypes = False - -try: - import pyarrow as arrow -except ImportError: - arrow = None - - -def check_numpy(): - if np: - return np - raise NotSupportedError('Numpy package is not installed') - - -def check_pandas(): - if pd: - return pd - raise NotSupportedError('Pandas package is not installed') - - -def check_arrow(): - if arrow: - return arrow - raise NotSupportedError('PyArrow package is not installed') diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/sql.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/sql.py deleted file mode 100644 index d97411059928e40cec3daca90478fbfd96107ced..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/sql.py +++ /dev/null @@ -1,993 +0,0 @@ -#!~/.wine/drive_c/Python25/python.exe -# -*- coding: utf-8 -*- - -# Copyright (c) 2009-2014, Mario Vilas -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice,this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -""" -SQL database storage support. - -@group Crash reporting: - CrashDAO -""" - -__revision__ = "$Id$" - -__all__ = ['CrashDAO'] - -import sqlite3 -import datetime -import warnings - -from sqlalchemy import create_engine, Column, ForeignKey, Sequence -from sqlalchemy.engine.url import URL -from sqlalchemy.ext.compiler import compiles -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.interfaces import PoolListener -from sqlalchemy.orm import sessionmaker, deferred -from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound -from sqlalchemy.types import Integer, BigInteger, Boolean, DateTime, String, \ - LargeBinary, Enum, VARCHAR -from sqlalchemy.sql.expression import asc, desc - -from crash import Crash, Marshaller, pickle, HIGHEST_PROTOCOL -from textio import CrashDump -import win32 - -#------------------------------------------------------------------------------ - -try: - from decorator import decorator -except ImportError: - import functools - def decorator(w): - """ - The C{decorator} module was not found. You can install it from: - U{http://pypi.python.org/pypi/decorator/} - """ - def d(fn): - @functools.wraps(fn) - def x(*argv, **argd): - return w(fn, *argv, **argd) - return x - return d - -#------------------------------------------------------------------------------ - -@compiles(String, 'mysql') -@compiles(VARCHAR, 'mysql') -def _compile_varchar_mysql(element, compiler, **kw): - """MySQL hack to avoid the "VARCHAR requires a length" error.""" - if not element.length or element.length == 'max': - return "TEXT" - else: - return compiler.visit_VARCHAR(element, **kw) - -#------------------------------------------------------------------------------ - -class _SQLitePatch (PoolListener): - """ - Used internally by L{BaseDAO}. - - After connecting to an SQLite database, ensure that the foreign keys - support is enabled. If not, abort the connection. - - @see: U{http://sqlite.org/foreignkeys.html} - """ - def connect(dbapi_connection, connection_record): - """ - Called once by SQLAlchemy for each new SQLite DB-API connection. - - Here is where we issue some PRAGMA statements to configure how we're - going to access the SQLite database. - - @param dbapi_connection: - A newly connected raw SQLite DB-API connection. - - @param connection_record: - Unused by this method. - """ - try: - cursor = dbapi_connection.cursor() - try: - cursor.execute("PRAGMA foreign_keys = ON;") - cursor.execute("PRAGMA foreign_keys;") - if cursor.fetchone()[0] != 1: - raise Exception() - finally: - cursor.close() - except Exception: - dbapi_connection.close() - raise sqlite3.Error() - -#------------------------------------------------------------------------------ - -class BaseDTO (object): - """ - Customized declarative base for SQLAlchemy. - """ - - __table_args__ = { - - # Don't use MyISAM in MySQL. It doesn't support ON DELETE CASCADE. - 'mysql_engine': 'InnoDB', - - # Don't use BlitzDB in Drizzle. It doesn't support foreign keys. - 'drizzle_engine': 'InnoDB', - - # Collate to UTF-8. - 'mysql_charset': 'utf8', - - } - -BaseDTO = declarative_base(cls = BaseDTO) - -#------------------------------------------------------------------------------ - -# TODO: if using mssql, check it's at least SQL Server 2005 -# (LIMIT and OFFSET support is required). -# TODO: if using mysql, check it's at least MySQL 5.0.3 -# (nested transactions are required). -# TODO: maybe in mysql check the tables are not myisam? -# TODO: maybe create the database if it doesn't exist? -# TODO: maybe add a method to compact the database? -# http://stackoverflow.com/questions/1875885 -# http://www.sqlite.org/lang_vacuum.html -# http://dev.mysql.com/doc/refman/5.1/en/optimize-table.html -# http://msdn.microsoft.com/en-us/library/ms174459(v=sql.90).aspx - -class BaseDAO (object): - """ - Data Access Object base class. - - @type _url: sqlalchemy.url.URL - @ivar _url: Database connection URL. - - @type _dialect: str - @ivar _dialect: SQL dialect currently being used. - - @type _driver: str - @ivar _driver: Name of the database driver currently being used. - To get the actual Python module use L{_url}.get_driver() instead. - - @type _session: sqlalchemy.orm.Session - @ivar _session: Database session object. - - @type _new_session: class - @cvar _new_session: Custom configured Session class used to create the - L{_session} instance variable. - - @type _echo: bool - @cvar _echo: Set to C{True} to print all SQL queries to standard output. - """ - - _echo = False - - _new_session = sessionmaker(autoflush = True, - autocommit = True, - expire_on_commit = True, - weak_identity_map = True) - - def __init__(self, url, creator = None): - """ - Connect to the database using the given connection URL. - - The current implementation uses SQLAlchemy and so it will support - whatever database said module supports. - - @type url: str - @param url: - URL that specifies the database to connect to. - - Some examples: - - Opening an SQLite file: - C{dao = CrashDAO("sqlite:///C:\\some\\path\\database.sqlite")} - - Connecting to a locally installed SQL Express database: - C{dao = CrashDAO("mssql://.\\SQLEXPRESS/Crashes?trusted_connection=yes")} - - Connecting to a MySQL database running locally, using the - C{oursql} library, authenticating as the "winappdbg" user with - no password: - C{dao = CrashDAO("mysql+oursql://winappdbg@localhost/Crashes")} - - Connecting to a Postgres database running locally, - authenticating with user and password: - C{dao = CrashDAO("postgresql://winappdbg:winappdbg@localhost/Crashes")} - - For more information see the C{SQLAlchemy} documentation online: - U{http://docs.sqlalchemy.org/en/latest/core/engines.html} - - Note that in all dialects except for SQLite the database - must already exist. The tables schema, however, is created - automatically when connecting for the first time. - - To create the database in MSSQL, you can use the - U{SQLCMD<http://msdn.microsoft.com/en-us/library/ms180944.aspx>} - command:: - sqlcmd -Q "CREATE DATABASE Crashes" - - In MySQL you can use something like the following:: - mysql -u root -e "CREATE DATABASE Crashes;" - - And in Postgres:: - createdb Crashes -h localhost -U winappdbg -p winappdbg -O winappdbg - - Some small changes to the schema may be tolerated (for example, - increasing the maximum length of string columns, or adding new - columns with default values). Of course, it's best to test it - first before making changes in a live database. This all depends - very much on the SQLAlchemy version you're using, but it's best - to use the latest version always. - - @type creator: callable - @param creator: (Optional) Callback function that creates the SQL - database connection. - - Normally it's not necessary to use this argument. However in some - odd cases you may need to customize the database connection. - """ - - # Parse the connection URL. - parsed_url = URL(url) - schema = parsed_url.drivername - if '+' in schema: - dialect, driver = schema.split('+') - else: - dialect, driver = schema, 'base' - dialect = dialect.strip().lower() - driver = driver.strip() - - # Prepare the database engine arguments. - arguments = {'echo' : self._echo} - if dialect == 'sqlite': - arguments['module'] = sqlite3.dbapi2 - arguments['listeners'] = [_SQLitePatch()] - if creator is not None: - arguments['creator'] = creator - - # Load the database engine. - engine = create_engine(url, **arguments) - - # Create a new session. - session = self._new_session(bind = engine) - - # Create the required tables if they don't exist. - BaseDTO.metadata.create_all(engine) - # TODO: create a dialect specific index on the "signature" column. - - # Set the instance properties. - self._url = parsed_url - self._driver = driver - self._dialect = dialect - self._session = session - - def _transactional(self, method, *argv, **argd): - """ - Begins a transaction and calls the given DAO method. - - If the method executes successfully the transaction is commited. - - If the method fails, the transaction is rolled back. - - @type method: callable - @param method: Bound method of this class or one of its subclasses. - The first argument will always be C{self}. - - @return: The return value of the method call. - - @raise Exception: Any exception raised by the method. - """ - self._session.begin(subtransactions = True) - try: - result = method(self, *argv, **argd) - self._session.commit() - return result - except: - self._session.rollback() - raise - -#------------------------------------------------------------------------------ - -@decorator -def Transactional(fn, self, *argv, **argd): - """ - Decorator that wraps DAO methods to handle transactions automatically. - - It may only work with subclasses of L{BaseDAO}. - """ - return self._transactional(fn, *argv, **argd) - -#============================================================================== - -# Generates all possible memory access flags. -def _gen_valid_access_flags(): - f = [] - for a1 in ("---", "R--", "RW-", "RC-", "--X", "R-X", "RWX", "RCX", "???"): - for a2 in ("G", "-"): - for a3 in ("N", "-"): - for a4 in ("W", "-"): - f.append("%s %s%s%s" % (a1, a2, a3, a4)) - return tuple(f) -_valid_access_flags = _gen_valid_access_flags() - -# Enumerated types for the memory table. -n_MEM_ACCESS_ENUM = {"name" : "MEM_ACCESS_ENUM"} -n_MEM_ALLOC_ACCESS_ENUM = {"name" : "MEM_ALLOC_ACCESS_ENUM"} -MEM_ACCESS_ENUM = Enum(*_valid_access_flags, - **n_MEM_ACCESS_ENUM) -MEM_ALLOC_ACCESS_ENUM = Enum(*_valid_access_flags, - **n_MEM_ALLOC_ACCESS_ENUM) -MEM_STATE_ENUM = Enum("Reserved", "Commited", "Free", "Unknown", - name = "MEM_STATE_ENUM") -MEM_TYPE_ENUM = Enum("Image", "Mapped", "Private", "Unknown", - name = "MEM_TYPE_ENUM") - -# Cleanup the namespace. -del _gen_valid_access_flags -del _valid_access_flags -del n_MEM_ACCESS_ENUM -del n_MEM_ALLOC_ACCESS_ENUM - -#------------------------------------------------------------------------------ - -class MemoryDTO (BaseDTO): - """ - Database mapping for memory dumps. - """ - - # Declare the table mapping. - __tablename__ = 'memory' - id = Column(Integer, Sequence(__tablename__ + '_seq'), - primary_key = True, autoincrement = True) - crash_id = Column(Integer, ForeignKey('crashes.id', - ondelete = 'CASCADE', - onupdate = 'CASCADE'), - nullable = False) - address = Column(BigInteger, nullable = False, index = True) - size = Column(BigInteger, nullable = False) - state = Column(MEM_STATE_ENUM, nullable = False) - access = Column(MEM_ACCESS_ENUM) - type = Column(MEM_TYPE_ENUM) - alloc_base = Column(BigInteger) - alloc_access = Column(MEM_ALLOC_ACCESS_ENUM) - filename = Column(String) - content = deferred(Column(LargeBinary)) - - def __init__(self, crash_id, mbi): - """ - Process a L{win32.MemoryBasicInformation} object for database storage. - """ - - # Crash ID. - self.crash_id = crash_id - - # Address. - self.address = mbi.BaseAddress - - # Size. - self.size = mbi.RegionSize - - # State (free or allocated). - if mbi.State == win32.MEM_RESERVE: - self.state = "Reserved" - elif mbi.State == win32.MEM_COMMIT: - self.state = "Commited" - elif mbi.State == win32.MEM_FREE: - self.state = "Free" - else: - self.state = "Unknown" - - # Page protection bits (R/W/X/G). - if mbi.State != win32.MEM_COMMIT: - self.access = None - else: - self.access = self._to_access(mbi.Protect) - - # Type (file mapping, executable image, or private memory). - if mbi.Type == win32.MEM_IMAGE: - self.type = "Image" - elif mbi.Type == win32.MEM_MAPPED: - self.type = "Mapped" - elif mbi.Type == win32.MEM_PRIVATE: - self.type = "Private" - elif mbi.Type == 0: - self.type = None - else: - self.type = "Unknown" - - # Allocation info. - self.alloc_base = mbi.AllocationBase - if not mbi.AllocationProtect: - self.alloc_access = None - else: - self.alloc_access = self._to_access(mbi.AllocationProtect) - - # Filename (for memory mappings). - try: - self.filename = mbi.filename - except AttributeError: - self.filename = None - - # Memory contents. - try: - self.content = mbi.content - except AttributeError: - self.content = None - - def _to_access(self, protect): - if protect & win32.PAGE_NOACCESS: - access = "--- " - elif protect & win32.PAGE_READONLY: - access = "R-- " - elif protect & win32.PAGE_READWRITE: - access = "RW- " - elif protect & win32.PAGE_WRITECOPY: - access = "RC- " - elif protect & win32.PAGE_EXECUTE: - access = "--X " - elif protect & win32.PAGE_EXECUTE_READ: - access = "R-X " - elif protect & win32.PAGE_EXECUTE_READWRITE: - access = "RWX " - elif protect & win32.PAGE_EXECUTE_WRITECOPY: - access = "RCX " - else: - access = "??? " - if protect & win32.PAGE_GUARD: - access += "G" - else: - access += "-" - if protect & win32.PAGE_NOCACHE: - access += "N" - else: - access += "-" - if protect & win32.PAGE_WRITECOMBINE: - access += "W" - else: - access += "-" - return access - - def toMBI(self, getMemoryDump = False): - """ - Returns a L{win32.MemoryBasicInformation} object using the data - retrieved from the database. - - @type getMemoryDump: bool - @param getMemoryDump: (Optional) If C{True} retrieve the memory dump. - Defaults to C{False} since this may be a costly operation. - - @rtype: L{win32.MemoryBasicInformation} - @return: Memory block information. - """ - mbi = win32.MemoryBasicInformation() - mbi.BaseAddress = self.address - mbi.RegionSize = self.size - mbi.State = self._parse_state(self.state) - mbi.Protect = self._parse_access(self.access) - mbi.Type = self._parse_type(self.type) - if self.alloc_base is not None: - mbi.AllocationBase = self.alloc_base - else: - mbi.AllocationBase = mbi.BaseAddress - if self.alloc_access is not None: - mbi.AllocationProtect = self._parse_access(self.alloc_access) - else: - mbi.AllocationProtect = mbi.Protect - if self.filename is not None: - mbi.filename = self.filename - if getMemoryDump and self.content is not None: - mbi.content = self.content - return mbi - - @staticmethod - def _parse_state(state): - if state: - if state == "Reserved": - return win32.MEM_RESERVE - if state == "Commited": - return win32.MEM_COMMIT - if state == "Free": - return win32.MEM_FREE - return 0 - - @staticmethod - def _parse_type(type): - if type: - if type == "Image": - return win32.MEM_IMAGE - if type == "Mapped": - return win32.MEM_MAPPED - if type == "Private": - return win32.MEM_PRIVATE - return -1 - return 0 - - @staticmethod - def _parse_access(access): - if not access: - return 0 - perm = access[:3] - if perm == "R--": - protect = win32.PAGE_READONLY - elif perm == "RW-": - protect = win32.PAGE_READWRITE - elif perm == "RC-": - protect = win32.PAGE_WRITECOPY - elif perm == "--X": - protect = win32.PAGE_EXECUTE - elif perm == "R-X": - protect = win32.PAGE_EXECUTE_READ - elif perm == "RWX": - protect = win32.PAGE_EXECUTE_READWRITE - elif perm == "RCX": - protect = win32.PAGE_EXECUTE_WRITECOPY - else: - protect = win32.PAGE_NOACCESS - if access[5] == "G": - protect = protect | win32.PAGE_GUARD - if access[6] == "N": - protect = protect | win32.PAGE_NOCACHE - if access[7] == "W": - protect = protect | win32.PAGE_WRITECOMBINE - return protect - -#------------------------------------------------------------------------------ - -class CrashDTO (BaseDTO): - """ - Database mapping for crash dumps. - """ - - # Table name. - __tablename__ = "crashes" - - # Primary key. - id = Column(Integer, Sequence(__tablename__ + '_seq'), - primary_key = True, autoincrement = True) - - # Timestamp. - timestamp = Column(DateTime, nullable = False, index = True) - - # Exploitability test. - exploitable = Column(Integer, nullable = False) - exploitability_rule = Column(String(32), nullable = False) - exploitability_rating = Column(String(32), nullable = False) - exploitability_desc = Column(String, nullable = False) - - # Platform description. - os = Column(String(32), nullable = False) - arch = Column(String(16), nullable = False) - bits = Column(Integer, nullable = False) # Integer(4) is deprecated :( - - # Event description. - event = Column(String, nullable = False) - pid = Column(Integer, nullable = False) - tid = Column(Integer, nullable = False) - pc = Column(BigInteger, nullable = False) - sp = Column(BigInteger, nullable = False) - fp = Column(BigInteger, nullable = False) - pc_label = Column(String, nullable = False) - - # Exception description. - exception = Column(String(64)) - exception_text = Column(String(64)) - exception_address = Column(BigInteger) - exception_label = Column(String) - first_chance = Column(Boolean) - fault_type = Column(Integer) - fault_address = Column(BigInteger) - fault_label = Column(String) - fault_disasm = Column(String) - stack_trace = Column(String) - - # Environment description. - command_line = Column(String) - environment = Column(String) - - # Debug strings. - debug_string = Column(String) - - # Notes. - notes = Column(String) - - # Heuristic signature. - signature = Column(String, nullable = False) - - # Pickled Crash object, minus the memory dump. - data = deferred(Column(LargeBinary, nullable = False)) - - def __init__(self, crash): - """ - @type crash: Crash - @param crash: L{Crash} object to store into the database. - """ - - # Timestamp and signature. - self.timestamp = datetime.datetime.fromtimestamp( crash.timeStamp ) - self.signature = pickle.dumps(crash.signature, protocol = 0) - - # Marshalled Crash object, minus the memory dump. - # This code is *not* thread safe! - memoryMap = crash.memoryMap - try: - crash.memoryMap = None - self.data = buffer( Marshaller.dumps(crash) ) - finally: - crash.memoryMap = memoryMap - - # Exploitability test. - self.exploitability_rating, \ - self.exploitability_rule, \ - self.exploitability_desc = crash.isExploitable() - - # Exploitability test as an integer result (for sorting). - self.exploitable = [ - "Not an exception", - "Not exploitable", - "Not likely exploitable", - "Unknown", - "Probably exploitable", - "Exploitable", - ].index(self.exploitability_rating) - - # Platform description. - self.os = crash.os - self.arch = crash.arch - self.bits = crash.bits - - # Event description. - self.event = crash.eventName - self.pid = crash.pid - self.tid = crash.tid - self.pc = crash.pc - self.sp = crash.sp - self.fp = crash.fp - self.pc_label = crash.labelPC - - # Exception description. - self.exception = crash.exceptionName - self.exception_text = crash.exceptionDescription - self.exception_address = crash.exceptionAddress - self.exception_label = crash.exceptionLabel - self.first_chance = crash.firstChance - self.fault_type = crash.faultType - self.fault_address = crash.faultAddress - self.fault_label = crash.faultLabel - self.fault_disasm = CrashDump.dump_code( crash.faultDisasm, - crash.pc ) - self.stack_trace = CrashDump.dump_stack_trace_with_labels( - crash.stackTracePretty ) - - # Command line. - self.command_line = crash.commandLine - - # Environment. - if crash.environment: - envList = crash.environment.items() - envList.sort() - environment = '' - for envKey, envVal in envList: - # Must concatenate here instead of using a substitution, - # so strings can be automatically promoted to Unicode. - environment += envKey + '=' + envVal + '\n' - if environment: - self.environment = environment - - # Debug string. - self.debug_string = crash.debugString - - # Notes. - self.notes = crash.notesReport() - - def toCrash(self, getMemoryDump = False): - """ - Returns a L{Crash} object using the data retrieved from the database. - - @type getMemoryDump: bool - @param getMemoryDump: If C{True} retrieve the memory dump. - Defaults to C{False} since this may be a costly operation. - - @rtype: L{Crash} - @return: Crash object. - """ - crash = Marshaller.loads(str(self.data)) - if not isinstance(crash, Crash): - raise TypeError( - "Expected Crash instance, got %s instead" % type(crash)) - crash._rowid = self.id - if not crash.memoryMap: - memory = getattr(self, "memory", []) - if memory: - crash.memoryMap = [dto.toMBI(getMemoryDump) for dto in memory] - return crash - -#============================================================================== - -# TODO: add a method to modify already stored crash dumps. - -class CrashDAO (BaseDAO): - """ - Data Access Object to read, write and search for L{Crash} objects in a - database. - """ - - @Transactional - def add(self, crash, allow_duplicates = True): - """ - Add a new crash dump to the database, optionally filtering them by - signature to avoid duplicates. - - @type crash: L{Crash} - @param crash: Crash object. - - @type allow_duplicates: bool - @param allow_duplicates: (Optional) - C{True} to always add the new crash dump. - C{False} to only add the crash dump if no other crash with the - same signature is found in the database. - - Sometimes, your fuzzer turns out to be I{too} good. Then you find - youself browsing through gigabytes of crash dumps, only to find - a handful of actual bugs in them. This simple heuristic filter - saves you the trouble by discarding crashes that seem to be similar - to another one you've already found. - """ - - # Filter out duplicated crashes, if requested. - if not allow_duplicates: - signature = pickle.dumps(crash.signature, protocol = 0) - if self._session.query(CrashDTO.id) \ - .filter_by(signature = signature) \ - .count() > 0: - return - - # Fill out a new row for the crashes table. - crash_id = self.__add_crash(crash) - - # Fill out new rows for the memory dump. - self.__add_memory(crash_id, crash.memoryMap) - - # On success set the row ID for the Crash object. - # WARNING: In nested calls, make sure to delete - # this property before a session rollback! - crash._rowid = crash_id - - # Store the Crash object into the crashes table. - def __add_crash(self, crash): - session = self._session - r_crash = None - try: - - # Fill out a new row for the crashes table. - r_crash = CrashDTO(crash) - session.add(r_crash) - - # Flush and get the new row ID. - session.flush() - crash_id = r_crash.id - - finally: - try: - - # Make the ORM forget the CrashDTO object. - if r_crash is not None: - session.expire(r_crash) - - finally: - - # Delete the last reference to the CrashDTO - # object, so the Python garbage collector claims it. - del r_crash - - # Return the row ID. - return crash_id - - # Store the memory dump into the memory table. - def __add_memory(self, crash_id, memoryMap): - session = self._session - if memoryMap: - for mbi in memoryMap: - r_mem = MemoryDTO(crash_id, mbi) - session.add(r_mem) - session.flush() - - @Transactional - def find(self, - signature = None, order = 0, - since = None, until = None, - offset = None, limit = None): - """ - Retrieve all crash dumps in the database, optionally filtering them by - signature and timestamp, and/or sorting them by timestamp. - - Results can be paged to avoid consuming too much memory if the database - is large. - - @see: L{find_by_example} - - @type signature: object - @param signature: (Optional) Return only through crashes matching - this signature. See L{Crash.signature} for more details. - - @type order: int - @param order: (Optional) Sort by timestamp. - If C{== 0}, results are not sorted. - If C{> 0}, results are sorted from older to newer. - If C{< 0}, results are sorted from newer to older. - - @type since: datetime - @param since: (Optional) Return only the crashes after and - including this date and time. - - @type until: datetime - @param until: (Optional) Return only the crashes before this date - and time, not including it. - - @type offset: int - @param offset: (Optional) Skip the first I{offset} results. - - @type limit: int - @param limit: (Optional) Return at most I{limit} results. - - @rtype: list(L{Crash}) - @return: List of Crash objects. - """ - - # Validate the parameters. - if since and until and since > until: - warnings.warn("CrashDAO.find() got the 'since' and 'until'" - " arguments reversed, corrected automatically.") - since, until = until, since - if limit is not None and not limit: - warnings.warn("CrashDAO.find() was set a limit of 0 results," - " returning without executing a query.") - return [] - - # Build the SQL query. - query = self._session.query(CrashDTO) - if signature is not None: - sig_pickled = pickle.dumps(signature, protocol = 0) - query = query.filter(CrashDTO.signature == sig_pickled) - if since: - query = query.filter(CrashDTO.timestamp >= since) - if until: - query = query.filter(CrashDTO.timestamp < until) - if order: - if order > 0: - query = query.order_by(asc(CrashDTO.timestamp)) - else: - query = query.order_by(desc(CrashDTO.timestamp)) - else: - # Default ordering is by row ID, to get consistent results. - # Also some database engines require ordering when using offsets. - query = query.order_by(asc(CrashDTO.id)) - if offset: - query = query.offset(offset) - if limit: - query = query.limit(limit) - - # Execute the SQL query and convert the results. - try: - return [dto.toCrash() for dto in query.all()] - except NoResultFound: - return [] - - @Transactional - def find_by_example(self, crash, offset = None, limit = None): - """ - Find all crash dumps that have common properties with the crash dump - provided. - - Results can be paged to avoid consuming too much memory if the database - is large. - - @see: L{find} - - @type crash: L{Crash} - @param crash: Crash object to compare with. Fields set to C{None} are - ignored, all other fields but the signature are used in the - comparison. - - To search for signature instead use the L{find} method. - - @type offset: int - @param offset: (Optional) Skip the first I{offset} results. - - @type limit: int - @param limit: (Optional) Return at most I{limit} results. - - @rtype: list(L{Crash}) - @return: List of similar crash dumps found. - """ - - # Validate the parameters. - if limit is not None and not limit: - warnings.warn("CrashDAO.find_by_example() was set a limit of 0" - " results, returning without executing a query.") - return [] - - # Build the query. - query = self._session.query(CrashDTO) - - # Order by row ID to get consistent results. - # Also some database engines require ordering when using offsets. - query = query.asc(CrashDTO.id) - - # Build a CrashDTO from the Crash object. - dto = CrashDTO(crash) - - # Filter all the fields in the crashes table that are present in the - # CrashDTO object and not set to None, except for the row ID. - for name, column in compat.iteritems(CrashDTO.__dict__): - if not name.startswith('__') and name not in ('id', - 'signature', - 'data'): - if isinstance(column, Column): - value = getattr(dto, name, None) - if value is not None: - query = query.filter(column == value) - - # Page the query. - if offset: - query = query.offset(offset) - if limit: - query = query.limit(limit) - - # Execute the SQL query and convert the results. - try: - return [dto.toCrash() for dto in query.all()] - except NoResultFound: - return [] - - @Transactional - def count(self, signature = None): - """ - Counts how many crash dumps have been stored in this database. - Optionally filters the count by heuristic signature. - - @type signature: object - @param signature: (Optional) Count only the crashes that match - this signature. See L{Crash.signature} for more details. - - @rtype: int - @return: Count of crash dumps stored in this database. - """ - query = self._session.query(CrashDTO.id) - if signature: - sig_pickled = pickle.dumps(signature, protocol = 0) - query = query.filter_by(signature = sig_pickled) - return query.count() - - @Transactional - def delete(self, crash): - """ - Remove the given crash dump from the database. - - @type crash: L{Crash} - @param crash: Crash dump to remove. - """ - query = self._session.query(CrashDTO).filter_by(id = crash._rowid) - query.delete(synchronize_session = False) - del crash._rowid diff --git a/spaces/Supedsa/rvc-models/lib/infer_pack/modules/F0Predictor/__init__.py b/spaces/Supedsa/rvc-models/lib/infer_pack/modules/F0Predictor/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/assign_score_withk.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/assign_score_withk.py deleted file mode 100644 index 4906adaa2cffd1b46912fbe7d4f87ef2f9fa0012..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/ops/assign_score_withk.py +++ /dev/null @@ -1,123 +0,0 @@ -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['assign_score_withk_forward', 'assign_score_withk_backward']) - - -class AssignScoreWithK(Function): - r"""Perform weighted sum to generate output features according to scores. - Modified from `PAConv <https://github.com/CVMI-Lab/PAConv/tree/main/ - scene_seg/lib/paconv_lib/src/gpu>`_. - - This is a memory-efficient CUDA implementation of assign_scores operation, - which first transform all point features with weight bank, then assemble - neighbor features with ``knn_idx`` and perform weighted sum of ``scores``. - - See the `paper <https://arxiv.org/pdf/2103.14635.pdf>`_ appendix Sec. D for - more detailed descriptions. - - Note: - This implementation assumes using ``neighbor`` kernel input, which is - (point_features - center_features, point_features). - See https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/model/ - pointnet2/paconv.py#L128 for more details. - """ - - @staticmethod - def forward(ctx, - scores, - point_features, - center_features, - knn_idx, - aggregate='sum'): - """ - Args: - scores (torch.Tensor): (B, npoint, K, M), predicted scores to - aggregate weight matrices in the weight bank. - ``npoint`` is the number of sampled centers. - ``K`` is the number of queried neighbors. - ``M`` is the number of weight matrices in the weight bank. - point_features (torch.Tensor): (B, N, M, out_dim) - Pre-computed point features to be aggregated. - center_features (torch.Tensor): (B, N, M, out_dim) - Pre-computed center features to be aggregated. - knn_idx (torch.Tensor): (B, npoint, K), index of sampled kNN. - We assume the first idx in each row is the idx of the center. - aggregate (str, optional): Aggregation method. - Can be 'sum', 'avg' or 'max'. Defaults: 'sum'. - - Returns: - torch.Tensor: (B, out_dim, npoint, K), the aggregated features. - """ - agg = {'sum': 0, 'avg': 1, 'max': 2} - - B, N, M, out_dim = point_features.size() - _, npoint, K, _ = scores.size() - - output = point_features.new_zeros((B, out_dim, npoint, K)) - ext_module.assign_score_withk_forward( - point_features.contiguous(), - center_features.contiguous(), - scores.contiguous(), - knn_idx.contiguous(), - output, - B=B, - N0=N, - N1=npoint, - M=M, - K=K, - O=out_dim, - aggregate=agg[aggregate]) - - ctx.save_for_backward(output, point_features, center_features, scores, - knn_idx) - ctx.agg = agg[aggregate] - - return output - - @staticmethod - def backward(ctx, grad_out): - """ - Args: - grad_out (torch.Tensor): (B, out_dim, npoint, K) - - Returns: - grad_scores (torch.Tensor): (B, npoint, K, M) - grad_point_features (torch.Tensor): (B, N, M, out_dim) - grad_center_features (torch.Tensor): (B, N, M, out_dim) - """ - _, point_features, center_features, scores, knn_idx = ctx.saved_tensors - - agg = ctx.agg - - B, N, M, out_dim = point_features.size() - _, npoint, K, _ = scores.size() - - grad_point_features = point_features.new_zeros(point_features.shape) - grad_center_features = center_features.new_zeros(center_features.shape) - grad_scores = scores.new_zeros(scores.shape) - - ext_module.assign_score_withk_backward( - grad_out.contiguous(), - point_features.contiguous(), - center_features.contiguous(), - scores.contiguous(), - knn_idx.contiguous(), - grad_point_features, - grad_center_features, - grad_scores, - B=B, - N0=N, - N1=npoint, - M=M, - K=K, - O=out_dim, - aggregate=agg) - - return grad_scores, grad_point_features, \ - grad_center_features, None, None - - -assign_score_withk = AssignScoreWithK.apply diff --git a/spaces/TencentARC/VLog/models/grit_src/image_dense_captions.py b/spaces/TencentARC/VLog/models/grit_src/image_dense_captions.py deleted file mode 100644 index a37196467599b5b13eef3c52b0b54a07ae50fb79..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/image_dense_captions.py +++ /dev/null @@ -1,69 +0,0 @@ -import argparse -import multiprocessing as mp -import os -import time -import cv2 -import tqdm -import sys - -from detectron2.config import get_cfg -from detectron2.data.detection_utils import read_image -from detectron2.utils.logger import setup_logger - -sys.path.insert(0, 'models/grit_src/third_party/CenterNet2/projects/CenterNet2/') -from centernet.config import add_centernet_config -from models.grit_src.grit.config import add_grit_config - -from models.grit_src.grit.predictor import VisualizationDemo -import json - - -# constants -WINDOW_NAME = "GRiT" - - -def dense_pred_to_caption(predictions): - boxes = predictions["instances"].pred_boxes if predictions["instances"].has("pred_boxes") else None - object_description = predictions["instances"].pred_object_descriptions.data - new_caption = "" - for i in range(len(object_description)): - if i != len(object_description) - 1: - new_caption += object_description[i] + ", " - else: - new_caption += object_description[i] - # new_caption += (object_description[i] + ": " + str([int(a) for a in boxes[i].tensor.cpu().detach().numpy()[0]])) + "; " - return new_caption - -def setup_cfg(args): - cfg = get_cfg() - if args["cpu"]: - cfg.MODEL.DEVICE="cpu" - add_centernet_config(cfg) - add_grit_config(cfg) - cfg.merge_from_file(args["config_file"]) - cfg.merge_from_list(args["opts"]) - # Set score_threshold for builtin models - cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args["confidence_threshold"] - cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args["confidence_threshold"] - if args["test_task"]: - cfg.MODEL.TEST_TASK = args["test_task"] - cfg.MODEL.BEAM_SIZE = 1 - cfg.MODEL.ROI_HEADS.SOFT_NMS_ENABLED = False - cfg.USE_ACT_CHECKPOINT = False - cfg.freeze() - return cfg - - -def get_parser(device): - arg_dict = {'config_file': "models/grit_src/configs/GRiT_B_DenseCap_ObjectDet.yaml", 'cpu': False, 'confidence_threshold': 0.5, 'test_task': 'DenseCap', 'opts': ["MODEL.WEIGHTS", "checkpoints/grit_b_densecap_objectdet.pth"]} - if device == "cpu": - arg_dict["cpu"] = True - return arg_dict - -def image_caption_api(image_src, device): - args2 = get_parser(device) - cfg = setup_cfg(args2) - demo = VisualizationDemo(cfg) - predictions, visualized_output = demo.run_on_image(image_src) - new_caption = dense_pred_to_caption(predictions) - return new_caption diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/notes/changelog.md b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/notes/changelog.md deleted file mode 100644 index 000e9f8898dba53f54121a5325ba5165e45ddea2..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/notes/changelog.md +++ /dev/null @@ -1,48 +0,0 @@ -# Change Log and Backward Compatibility - -### Releases -See release logs at -[https://github.com/facebookresearch/detectron2/releases](https://github.com/facebookresearch/detectron2/releases) -for new updates. - -### Backward Compatibility - -Due to the research nature of what the library does, there might be backward incompatible changes. -But we try to reduce users' disruption by the following ways: -* APIs listed in [API documentation](https://detectron2.readthedocs.io/modules/index.html), including - function/class names, their arguments, and documented class attributes, are considered *stable* unless - otherwise noted in the documentation. - They are less likely to be broken, but if needed, will trigger a deprecation warning for a reasonable period - before getting broken, and will be documented in release logs. -* Others functions/classses/attributes are considered internal, and are more likely to change. - However, we're aware that some of them may be already used by other projects, and in particular we may - use them for convenience among projects under `detectron2/projects`. - For such APIs, we may treat them as stable APIs and also apply the above strategies. - They may be promoted to stable when we're ready. -* Projects under "detectron2/projects" or imported with "detectron2.projects" are research projects - and are all considered experimental. -* Classes/functions that contain the word "default" or are explicitly documented to produce - "default behavior" may change their behaviors when new features are added. - -Despite of the possible breakage, if a third-party project would like to keep up with the latest updates -in detectron2, using it as a library will still be less disruptive than forking, because -the frequency and scope of API changes will be much smaller than code changes. - -To see such changes, search for "incompatible changes" in [release logs](https://github.com/facebookresearch/detectron2/releases). - -### Config Version Change Log - -Detectron2's config version has not been changed since open source. -There is no need for an open source user to worry about this. - -* v1: Rename `RPN_HEAD.NAME` to `RPN.HEAD_NAME`. -* v2: A batch of rename of many configurations before release. - -### Silent Regressions in Historical Versions: - -We list a few silent regressions, since they may silently produce incorrect results and will be hard to debug. - -* 04/01/2020 - 05/11/2020: Bad accuracy if `TRAIN_ON_PRED_BOXES` is set to True. -* 03/30/2020 - 04/01/2020: ResNets are not correctly built. -* 12/19/2019 - 12/26/2019: Using aspect ratio grouping causes a drop in accuracy. -* - 11/9/2019: Test time augmentation does not predict the last category. diff --git a/spaces/Thaweewat/ControlNet-Architecture/cldm/cldm.py b/spaces/Thaweewat/ControlNet-Architecture/cldm/cldm.py deleted file mode 100644 index 11e791c34f7b4af5d795392f53a0894793bfc792..0000000000000000000000000000000000000000 --- a/spaces/Thaweewat/ControlNet-Architecture/cldm/cldm.py +++ /dev/null @@ -1,417 +0,0 @@ -import einops -import torch -import torch as th -import torch.nn as nn - -from ldm.modules.diffusionmodules.util import ( - conv_nd, - linear, - zero_module, - timestep_embedding, -) - -from einops import rearrange, repeat -from torchvision.utils import make_grid -from ldm.modules.attention import SpatialTransformer -from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock -from ldm.models.diffusion.ddpm import LatentDiffusion -from ldm.util import log_txt_as_img, exists, instantiate_from_config -from ldm.models.diffusion.ddim import DDIMSampler - - -class ControlledUnetModel(UNetModel): - def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs): - hs = [] - with torch.no_grad(): - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) - emb = self.time_embed(t_emb) - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb, context) - hs.append(h) - h = self.middle_block(h, emb, context) - - h += control.pop() - - for i, module in enumerate(self.output_blocks): - if only_mid_control: - h = torch.cat([h, hs.pop()], dim=1) - else: - h = torch.cat([h, hs.pop() + control.pop()], dim=1) - h = module(h, emb, context) - - h = h.type(x.dtype) - return self.out(h) - - -class ControlNet(nn.Module): - def __init__( - self, - image_size, - in_channels, - model_channels, - hint_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - use_checkpoint=False, - use_fp16=False, - num_heads=-1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - use_new_attention_order=False, - use_spatial_transformer=False, # custom transformer support - transformer_depth=1, # custom transformer support - context_dim=None, # custom transformer support - n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model - legacy=True, - disable_self_attentions=None, - num_attention_blocks=None, - disable_middle_self_attn=False, - use_linear_in_transformer=False, - ): - super().__init__() - if use_spatial_transformer: - assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' - - if context_dim is not None: - assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' - from omegaconf.listconfig import ListConfig - if type(context_dim) == ListConfig: - context_dim = list(context_dim) - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - if num_heads == -1: - assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' - - if num_head_channels == -1: - assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' - - self.dims = dims - self.image_size = image_size - self.in_channels = in_channels - self.model_channels = model_channels - if isinstance(num_res_blocks, int): - self.num_res_blocks = len(channel_mult) * [num_res_blocks] - else: - if len(num_res_blocks) != len(channel_mult): - raise ValueError("provide num_res_blocks either as an int (globally constant) or " - "as a list/tuple (per-level) with the same length as channel_mult") - self.num_res_blocks = num_res_blocks - if disable_self_attentions is not None: - # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not - assert len(disable_self_attentions) == len(channel_mult) - if num_attention_blocks is not None: - assert len(num_attention_blocks) == len(self.num_res_blocks) - assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) - print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " - f"This option has LESS priority than attention_resolutions {attention_resolutions}, " - f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " - f"attention will still not be set.") - - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - self.predict_codebook_ids = n_embed is not None - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - self.input_blocks = nn.ModuleList( - [ - TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1) - ) - ] - ) - self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels)]) - - self.input_hint_block = TimestepEmbedSequential( - conv_nd(dims, hint_channels, 16, 3, padding=1), - nn.SiLU(), - conv_nd(dims, 16, 16, 3, padding=1), - nn.SiLU(), - conv_nd(dims, 16, 32, 3, padding=1, stride=2), - nn.SiLU(), - conv_nd(dims, 32, 32, 3, padding=1), - nn.SiLU(), - conv_nd(dims, 32, 96, 3, padding=1, stride=2), - nn.SiLU(), - conv_nd(dims, 96, 96, 3, padding=1), - nn.SiLU(), - conv_nd(dims, 96, 256, 3, padding=1, stride=2), - nn.SiLU(), - zero_module(conv_nd(dims, 256, model_channels, 3, padding=1)) - ) - - self._feature_size = model_channels - input_block_chans = [model_channels] - ch = model_channels - ds = 1 - for level, mult in enumerate(channel_mult): - for nr in range(self.num_res_blocks[level]): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=mult * model_channels, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = mult * model_channels - if ds in attention_resolutions: - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - if exists(disable_self_attentions): - disabled_sa = disable_self_attentions[level] - else: - disabled_sa = False - - if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, - disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self.zero_convs.append(self.make_zero_conv(ch)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - self.zero_convs.append(self.make_zero_conv(ch)) - ds *= 2 - self._feature_size += ch - - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, - disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self.middle_block_out = self.make_zero_conv(ch) - self._feature_size += ch - - def make_zero_conv(self, channels): - return TimestepEmbedSequential(zero_module(conv_nd(self.dims, channels, channels, 1, padding=0))) - - def forward(self, x, hint, timesteps, context, **kwargs): - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) - emb = self.time_embed(t_emb) - - guided_hint = self.input_hint_block(hint, emb, context) - - outs = [] - - h = x.type(self.dtype) - for module, zero_conv in zip(self.input_blocks, self.zero_convs): - if guided_hint is not None: - h = module(h, emb, context) - h += guided_hint - guided_hint = None - else: - h = module(h, emb, context) - outs.append(zero_conv(h, emb, context)) - - h = self.middle_block(h, emb, context) - outs.append(self.middle_block_out(h, emb, context)) - - return outs - - -class ControlLDM(LatentDiffusion): - - def __init__(self, control_stage_config, control_key, only_mid_control, *args, **kwargs): - super().__init__(*args, **kwargs) - self.control_model = instantiate_from_config(control_stage_config) - self.control_key = control_key - self.only_mid_control = only_mid_control - - @torch.no_grad() - def get_input(self, batch, k, bs=None, *args, **kwargs): - x, c = super().get_input(batch, self.first_stage_key, *args, **kwargs) - control = batch[self.control_key] - if bs is not None: - control = control[:bs] - control = control.to(self.device) - control = einops.rearrange(control, 'b h w c -> b c h w') - control = control.to(memory_format=torch.contiguous_format).float() - return x, dict(c_crossattn=[c], c_concat=[control]) - - def apply_model(self, x_noisy, t, cond, *args, **kwargs): - assert isinstance(cond, dict) - diffusion_model = self.model.diffusion_model - cond_txt = torch.cat(cond['c_crossattn'], 1) - cond_hint = torch.cat(cond['c_concat'], 1) - - control = self.control_model(x=x_noisy, hint=cond_hint, timesteps=t, context=cond_txt) - eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=control, only_mid_control=self.only_mid_control) - - return eps - - @torch.no_grad() - def get_unconditional_conditioning(self, N): - return self.get_learned_conditioning([""] * N) - - @torch.no_grad() - def log_images(self, batch, N=4, n_row=2, sample=False, ddim_steps=50, ddim_eta=0.0, return_keys=None, - quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, - plot_diffusion_rows=False, unconditional_guidance_scale=9.0, unconditional_guidance_label=None, - use_ema_scope=True, - **kwargs): - use_ddim = ddim_steps is not None - - log = dict() - z, c = self.get_input(batch, self.first_stage_key, bs=N) - c_cat, c = c["c_concat"][0][:N], c["c_crossattn"][0][:N] - N = min(z.shape[0], N) - n_row = min(z.shape[0], n_row) - log["reconstruction"] = self.decode_first_stage(z) - log["control"] = c_cat * 2.0 - 1.0 - log["conditioning"] = log_txt_as_img((512, 512), batch[self.cond_stage_key], size=16) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample: - # get denoise row - samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, - batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if unconditional_guidance_scale > 1.0: - uc_cross = self.get_unconditional_conditioning(N) - uc_cat = c_cat # torch.zeros_like(c_cat) - uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]} - samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, - batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=uc_full, - ) - x_samples_cfg = self.decode_first_stage(samples_cfg) - log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg - - return log - - @torch.no_grad() - def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): - ddim_sampler = DDIMSampler(self) - b, c, h, w = cond["c_concat"][0].shape - shape = (self.channels, h // 8, w // 8) - samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) - return samples, intermediates - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.control_model.parameters()) - if not self.sd_locked: - params += list(self.model.diffusion_model.output_blocks.parameters()) - params += list(self.model.diffusion_model.out.parameters()) - opt = torch.optim.AdamW(params, lr=lr) - return opt diff --git a/spaces/Truym/rvc-pendu/infer_pack/commons.py b/spaces/Truym/rvc-pendu/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/Truym/rvc-pendu/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/Virus561/sdf/README.md b/spaces/Virus561/sdf/README.md deleted file mode 100644 index a1a3154098525dd5c2ef7d5e45754d85fa8135a1..0000000000000000000000000000000000000000 --- a/spaces/Virus561/sdf/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Sdf -emoji: 🐠 -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.44.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/VoiceHero69/changer/webui/modules/implementations/rvc/infer_pack/modules/F0Predictor/__init__.py b/spaces/VoiceHero69/changer/webui/modules/implementations/rvc/infer_pack/modules/F0Predictor/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/WelcomeToTheClub/VMware-open-llama-7b-open-instruct/README.md b/spaces/WelcomeToTheClub/VMware-open-llama-7b-open-instruct/README.md deleted file mode 100644 index 9fe35420240a4bd5f0022092e84343c02c6d21ec..0000000000000000000000000000000000000000 --- a/spaces/WelcomeToTheClub/VMware-open-llama-7b-open-instruct/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: VMware Open Llama 7b Open Instruct -emoji: 🌍 -colorFrom: purple -colorTo: indigo -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py deleted file mode 100644 index ef0b6d16d4403fb5d16a3aeb71a22621a0be5e21..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ.py +++ /dev/null @@ -1,29 +0,0 @@ -from .mask_rcnn_R_50_FPN_100ep_LSJ import ( - dataloader, - lr_multiplier, - model, - optimizer, - train, -) -from detectron2.config import LazyCall as L -from detectron2.modeling.backbone import RegNet -from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock - -# Config source: -# https://github.com/facebookresearch/detectron2/blob/main/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py # noqa -model.backbone.bottom_up = L(RegNet)( - stem_class=SimpleStem, - stem_width=32, - block_class=ResBottleneckBlock, - depth=23, - w_a=38.65, - w_0=96, - w_m=2.43, - group_width=40, - norm="SyncBN", - out_features=["s1", "s2", "s3", "s4"], -) -model.pixel_std = [57.375, 57.120, 58.395] - -# RegNets benefit from enabling cudnn benchmark mode -train.cudnn_benchmark = True diff --git a/spaces/abdvl/datahub_qa_bot/docs/_feature-guide-template.md b/spaces/abdvl/datahub_qa_bot/docs/_feature-guide-template.md deleted file mode 100644 index 8e4ec6f64936179d2d3a2029cdfa21191f8d653e..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/_feature-guide-template.md +++ /dev/null @@ -1,83 +0,0 @@ -import FeatureAvailability from '@site/src/components/FeatureAvailability'; - -# About DataHub [Feature Name] - -<!-- All Feature Guides should begin with `About DataHub ` to improve SEO --> - -<!-- -Update feature availability; by default, feature availabilty is Self-Hosted and Managed DataHub - -Add in `saasOnly` for Managed DataHub-only features - --> - -<FeatureAvailability/> - -<!-- This section should provide a plain-language overview of feature. Consider the following: - -* What does this feature do? Why is it useful? -* What are the typical use cases? -* Who are the typical users? -* In which DataHub Version did this become available? --> - -## [Feature Name] Setup, Prerequisites, and Permissions - -<!-- This section should provide plain-language instructions on how to configure the feature: - -* What special configuration is required, if any? -* How can you confirm you configured it correctly? What is the expected behavior? -* What access levels/permissions are required within DataHub? --> - -## Using [Feature Name] - -<!-- Plain-language instructions of how to use the feature - -Provide a step-by-step guide to use feature, including relevant screenshots and/or GIFs - -* Where/how do you access it? -* What best practices exist? -* What are common code snippets? - --> - -## Additional Resources - -<!-- Comment out any irrelevant or empty sections --> - -### Videos - -<!-- Use the following format to embed YouTube videos: - -**Title of YouTube video in bold text** - -<p align="center"> -<iframe width="560" height="315" src="www.youtube.com/embed/VIDEO_ID" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> -</p> - ---> - -<!-- -NOTE: Find the iframe details in YouTube by going to Share > Embed - --> - -### GraphQL - -<!-- Bulleted list of relevant GraphQL docs; comment out section if none --> - -### DataHub Blog - -<!-- Bulleted list of relevant DataHub Blog posts; comment out section if none --> - -## FAQ and Troubleshooting - -<!-- Use the following format: - -**Question in bold text** - -Response in plain text - ---> - -*Need more help? Join the conversation in [Slack](http://slack.datahubproject.io)!* - -### Related Features - -<!-- Bulleted list of related features; comment out section if none --> \ No newline at end of file diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/upernet_r50.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/upernet_r50.py deleted file mode 100644 index 10974962fdd7136031fd06de1700f497d355ceaa..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/upernet_r50.py +++ /dev/null @@ -1,44 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 1, 1), - strides=(1, 2, 2, 2), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='UPerHead', - in_channels=[256, 512, 1024, 2048], - in_index=[0, 1, 2, 3], - pool_scales=(1, 2, 3, 6), - channels=512, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/__init__.py deleted file mode 100644 index 7246c897430f0cc7ce12719ad8608824fc734446..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .alexnet import AlexNet -# yapf: disable -from .bricks import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS, - PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS, - ContextBlock, Conv2d, Conv3d, ConvAWS2d, ConvModule, - ConvTranspose2d, ConvTranspose3d, ConvWS2d, - DepthwiseSeparableConvModule, GeneralizedAttention, - HSigmoid, HSwish, Linear, MaxPool2d, MaxPool3d, - NonLocal1d, NonLocal2d, NonLocal3d, Scale, Swish, - build_activation_layer, build_conv_layer, - build_norm_layer, build_padding_layer, build_plugin_layer, - build_upsample_layer, conv_ws_2d, is_norm) -from .builder import MODELS, build_model_from_cfg -# yapf: enable -from .resnet import ResNet, make_res_layer -from .utils import (INITIALIZERS, Caffe2XavierInit, ConstantInit, KaimingInit, - NormalInit, PretrainedInit, TruncNormalInit, UniformInit, - XavierInit, bias_init_with_prob, caffe2_xavier_init, - constant_init, fuse_conv_bn, get_model_complexity_info, - initialize, kaiming_init, normal_init, trunc_normal_init, - uniform_init, xavier_init) -from .vgg import VGG, make_vgg_layer - -__all__ = [ - 'AlexNet', 'VGG', 'make_vgg_layer', 'ResNet', 'make_res_layer', - 'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init', - 'uniform_init', 'kaiming_init', 'caffe2_xavier_init', - 'bias_init_with_prob', 'ConvModule', 'build_activation_layer', - 'build_conv_layer', 'build_norm_layer', 'build_padding_layer', - 'build_upsample_layer', 'build_plugin_layer', 'is_norm', 'NonLocal1d', - 'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'HSigmoid', 'Swish', 'HSwish', - 'GeneralizedAttention', 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS', - 'PADDING_LAYERS', 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale', - 'get_model_complexity_info', 'conv_ws_2d', 'ConvAWS2d', 'ConvWS2d', - 'fuse_conv_bn', 'DepthwiseSeparableConvModule', 'Linear', 'Conv2d', - 'ConvTranspose2d', 'MaxPool2d', 'ConvTranspose3d', 'MaxPool3d', 'Conv3d', - 'initialize', 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit', - 'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit', - 'Caffe2XavierInit', 'MODELS', 'build_model_from_cfg' -] diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/iou3d.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/iou3d.py deleted file mode 100644 index 6fc71979190323f44c09f8b7e1761cf49cd2d76b..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/iou3d.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', [ - 'iou3d_boxes_iou_bev_forward', 'iou3d_nms_forward', - 'iou3d_nms_normal_forward' -]) - - -def boxes_iou_bev(boxes_a, boxes_b): - """Calculate boxes IoU in the Bird's Eye View. - - Args: - boxes_a (torch.Tensor): Input boxes a with shape (M, 5). - boxes_b (torch.Tensor): Input boxes b with shape (N, 5). - - Returns: - ans_iou (torch.Tensor): IoU result with shape (M, N). - """ - ans_iou = boxes_a.new_zeros( - torch.Size((boxes_a.shape[0], boxes_b.shape[0]))) - - ext_module.iou3d_boxes_iou_bev_forward(boxes_a.contiguous(), - boxes_b.contiguous(), ans_iou) - - return ans_iou - - -def nms_bev(boxes, scores, thresh, pre_max_size=None, post_max_size=None): - """NMS function GPU implementation (for BEV boxes). The overlap of two - boxes for IoU calculation is defined as the exact overlapping area of the - two boxes. In this function, one can also set ``pre_max_size`` and - ``post_max_size``. - - Args: - boxes (torch.Tensor): Input boxes with the shape of [N, 5] - ([x1, y1, x2, y2, ry]). - scores (torch.Tensor): Scores of boxes with the shape of [N]. - thresh (float): Overlap threshold of NMS. - pre_max_size (int, optional): Max size of boxes before NMS. - Default: None. - post_max_size (int, optional): Max size of boxes after NMS. - Default: None. - - Returns: - torch.Tensor: Indexes after NMS. - """ - assert boxes.size(1) == 5, 'Input boxes shape should be [N, 5]' - order = scores.sort(0, descending=True)[1] - - if pre_max_size is not None: - order = order[:pre_max_size] - boxes = boxes[order].contiguous() - - keep = torch.zeros(boxes.size(0), dtype=torch.long) - num_out = ext_module.iou3d_nms_forward(boxes, keep, thresh) - keep = order[keep[:num_out].cuda(boxes.device)].contiguous() - if post_max_size is not None: - keep = keep[:post_max_size] - return keep - - -def nms_normal_bev(boxes, scores, thresh): - """Normal NMS function GPU implementation (for BEV boxes). The overlap of - two boxes for IoU calculation is defined as the exact overlapping area of - the two boxes WITH their yaw angle set to 0. - - Args: - boxes (torch.Tensor): Input boxes with shape (N, 5). - scores (torch.Tensor): Scores of predicted boxes with shape (N). - thresh (float): Overlap threshold of NMS. - - Returns: - torch.Tensor: Remaining indices with scores in descending order. - """ - assert boxes.shape[1] == 5, 'Input boxes shape should be [N, 5]' - order = scores.sort(0, descending=True)[1] - - boxes = boxes[order].contiguous() - - keep = torch.zeros(boxes.size(0), dtype=torch.long) - num_out = ext_module.iou3d_nms_normal_forward(boxes, keep, thresh) - return order[keep[:num_out].cuda(boxes.device)].contiguous() diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/detectors/paa.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/detectors/paa.py deleted file mode 100644 index 9b4bb5e0939b824d9fef7fc3bd49a0164c29613a..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/detectors/paa.py +++ /dev/null @@ -1,17 +0,0 @@ -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class PAA(SingleStageDetector): - """Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_.""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None): - super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/fp16_utils.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/fp16_utils.py deleted file mode 100644 index 1981011d6859192e3e663e29d13500d56ba47f6c..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/fp16_utils.py +++ /dev/null @@ -1,410 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import functools -import warnings -from collections import abc -from inspect import getfullargspec - -import numpy as np -import torch -import torch.nn as nn - -from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version -from .dist_utils import allreduce_grads as _allreduce_grads - -try: - # If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported - # and used; otherwise, auto fp16 will adopt mmcv's implementation. - # Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16 - # manually, so the behavior may not be consistent with real amp. - from torch.cuda.amp import autocast -except ImportError: - pass - - -def cast_tensor_type(inputs, src_type, dst_type): - """Recursively convert Tensor in inputs from src_type to dst_type. - - Args: - inputs: Inputs that to be casted. - src_type (torch.dtype): Source type.. - dst_type (torch.dtype): Destination type. - - Returns: - The same type with inputs, but all contained Tensors have been cast. - """ - if isinstance(inputs, nn.Module): - return inputs - elif isinstance(inputs, torch.Tensor): - return inputs.to(dst_type) - elif isinstance(inputs, str): - return inputs - elif isinstance(inputs, np.ndarray): - return inputs - elif isinstance(inputs, abc.Mapping): - return type(inputs)({ - k: cast_tensor_type(v, src_type, dst_type) - for k, v in inputs.items() - }) - elif isinstance(inputs, abc.Iterable): - return type(inputs)( - cast_tensor_type(item, src_type, dst_type) for item in inputs) - else: - return inputs - - -def auto_fp16(apply_to=None, out_fp32=False): - """Decorator to enable fp16 training automatically. - - This decorator is useful when you write custom modules and want to support - mixed precision training. If inputs arguments are fp32 tensors, they will - be converted to fp16 automatically. Arguments other than fp32 tensors are - ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the - backend, otherwise, original mmcv implementation will be adopted. - - Args: - apply_to (Iterable, optional): The argument names to be converted. - `None` indicates all arguments. - out_fp32 (bool): Whether to convert the output back to fp32. - - Example: - - >>> import torch.nn as nn - >>> class MyModule1(nn.Module): - >>> - >>> # Convert x and y to fp16 - >>> @auto_fp16() - >>> def forward(self, x, y): - >>> pass - - >>> import torch.nn as nn - >>> class MyModule2(nn.Module): - >>> - >>> # convert pred to fp16 - >>> @auto_fp16(apply_to=('pred', )) - >>> def do_something(self, pred, others): - >>> pass - """ - - def auto_fp16_wrapper(old_func): - - @functools.wraps(old_func) - def new_func(*args, **kwargs): - # check if the module has set the attribute `fp16_enabled`, if not, - # just fallback to the original method. - if not isinstance(args[0], torch.nn.Module): - raise TypeError('@auto_fp16 can only be used to decorate the ' - 'method of nn.Module') - if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): - return old_func(*args, **kwargs) - - # get the arg spec of the decorated method - args_info = getfullargspec(old_func) - # get the argument names to be casted - args_to_cast = args_info.args if apply_to is None else apply_to - # convert the args that need to be processed - new_args = [] - # NOTE: default args are not taken into consideration - if args: - arg_names = args_info.args[:len(args)] - for i, arg_name in enumerate(arg_names): - if arg_name in args_to_cast: - new_args.append( - cast_tensor_type(args[i], torch.float, torch.half)) - else: - new_args.append(args[i]) - # convert the kwargs that need to be processed - new_kwargs = {} - if kwargs: - for arg_name, arg_value in kwargs.items(): - if arg_name in args_to_cast: - new_kwargs[arg_name] = cast_tensor_type( - arg_value, torch.float, torch.half) - else: - new_kwargs[arg_name] = arg_value - # apply converted arguments to the decorated method - if (TORCH_VERSION != 'parrots' and - digit_version(TORCH_VERSION) >= digit_version('1.6.0')): - with autocast(enabled=True): - output = old_func(*new_args, **new_kwargs) - else: - output = old_func(*new_args, **new_kwargs) - # cast the results back to fp32 if necessary - if out_fp32: - output = cast_tensor_type(output, torch.half, torch.float) - return output - - return new_func - - return auto_fp16_wrapper - - -def force_fp32(apply_to=None, out_fp16=False): - """Decorator to convert input arguments to fp32 in force. - - This decorator is useful when you write custom modules and want to support - mixed precision training. If there are some inputs that must be processed - in fp32 mode, then this decorator can handle it. If inputs arguments are - fp16 tensors, they will be converted to fp32 automatically. Arguments other - than fp16 tensors are ignored. If you are using PyTorch >= 1.6, - torch.cuda.amp is used as the backend, otherwise, original mmcv - implementation will be adopted. - - Args: - apply_to (Iterable, optional): The argument names to be converted. - `None` indicates all arguments. - out_fp16 (bool): Whether to convert the output back to fp16. - - Example: - - >>> import torch.nn as nn - >>> class MyModule1(nn.Module): - >>> - >>> # Convert x and y to fp32 - >>> @force_fp32() - >>> def loss(self, x, y): - >>> pass - - >>> import torch.nn as nn - >>> class MyModule2(nn.Module): - >>> - >>> # convert pred to fp32 - >>> @force_fp32(apply_to=('pred', )) - >>> def post_process(self, pred, others): - >>> pass - """ - - def force_fp32_wrapper(old_func): - - @functools.wraps(old_func) - def new_func(*args, **kwargs): - # check if the module has set the attribute `fp16_enabled`, if not, - # just fallback to the original method. - if not isinstance(args[0], torch.nn.Module): - raise TypeError('@force_fp32 can only be used to decorate the ' - 'method of nn.Module') - if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): - return old_func(*args, **kwargs) - # get the arg spec of the decorated method - args_info = getfullargspec(old_func) - # get the argument names to be casted - args_to_cast = args_info.args if apply_to is None else apply_to - # convert the args that need to be processed - new_args = [] - if args: - arg_names = args_info.args[:len(args)] - for i, arg_name in enumerate(arg_names): - if arg_name in args_to_cast: - new_args.append( - cast_tensor_type(args[i], torch.half, torch.float)) - else: - new_args.append(args[i]) - # convert the kwargs that need to be processed - new_kwargs = dict() - if kwargs: - for arg_name, arg_value in kwargs.items(): - if arg_name in args_to_cast: - new_kwargs[arg_name] = cast_tensor_type( - arg_value, torch.half, torch.float) - else: - new_kwargs[arg_name] = arg_value - # apply converted arguments to the decorated method - if (TORCH_VERSION != 'parrots' and - digit_version(TORCH_VERSION) >= digit_version('1.6.0')): - with autocast(enabled=False): - output = old_func(*new_args, **new_kwargs) - else: - output = old_func(*new_args, **new_kwargs) - # cast the results back to fp32 if necessary - if out_fp16: - output = cast_tensor_type(output, torch.float, torch.half) - return output - - return new_func - - return force_fp32_wrapper - - -def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): - warnings.warning( - '"mmcv.runner.fp16_utils.allreduce_grads" is deprecated, and will be ' - 'removed in v2.8. Please switch to "mmcv.runner.allreduce_grads') - _allreduce_grads(params, coalesce=coalesce, bucket_size_mb=bucket_size_mb) - - -def wrap_fp16_model(model): - """Wrap the FP32 model to FP16. - - If you are using PyTorch >= 1.6, torch.cuda.amp is used as the - backend, otherwise, original mmcv implementation will be adopted. - - For PyTorch >= 1.6, this function will - 1. Set fp16 flag inside the model to True. - - Otherwise: - 1. Convert FP32 model to FP16. - 2. Remain some necessary layers to be FP32, e.g., normalization layers. - 3. Set `fp16_enabled` flag inside the model to True. - - Args: - model (nn.Module): Model in FP32. - """ - if (TORCH_VERSION == 'parrots' - or digit_version(TORCH_VERSION) < digit_version('1.6.0')): - # convert model to fp16 - model.half() - # patch the normalization layers to make it work in fp32 mode - patch_norm_fp32(model) - # set `fp16_enabled` flag - for m in model.modules(): - if hasattr(m, 'fp16_enabled'): - m.fp16_enabled = True - - -def patch_norm_fp32(module): - """Recursively convert normalization layers from FP16 to FP32. - - Args: - module (nn.Module): The modules to be converted in FP16. - - Returns: - nn.Module: The converted module, the normalization layers have been - converted to FP32. - """ - if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)): - module.float() - if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3': - module.forward = patch_forward_method(module.forward, torch.half, - torch.float) - for child in module.children(): - patch_norm_fp32(child) - return module - - -def patch_forward_method(func, src_type, dst_type, convert_output=True): - """Patch the forward method of a module. - - Args: - func (callable): The original forward method. - src_type (torch.dtype): Type of input arguments to be converted from. - dst_type (torch.dtype): Type of input arguments to be converted to. - convert_output (bool): Whether to convert the output back to src_type. - - Returns: - callable: The patched forward method. - """ - - def new_forward(*args, **kwargs): - output = func(*cast_tensor_type(args, src_type, dst_type), - **cast_tensor_type(kwargs, src_type, dst_type)) - if convert_output: - output = cast_tensor_type(output, dst_type, src_type) - return output - - return new_forward - - -class LossScaler: - """Class that manages loss scaling in mixed precision training which - supports both dynamic or static mode. - - The implementation refers to - https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py. - Indirectly, by supplying ``mode='dynamic'`` for dynamic loss scaling. - It's important to understand how :class:`LossScaler` operates. - Loss scaling is designed to combat the problem of underflowing - gradients encountered at long times when training fp16 networks. - Dynamic loss scaling begins by attempting a very high loss - scale. Ironically, this may result in OVERflowing gradients. - If overflowing gradients are encountered, :class:`FP16_Optimizer` then - skips the update step for this particular iteration/minibatch, - and :class:`LossScaler` adjusts the loss scale to a lower value. - If a certain number of iterations occur without overflowing gradients - detected,:class:`LossScaler` increases the loss scale once more. - In this way :class:`LossScaler` attempts to "ride the edge" of always - using the highest loss scale possible without incurring overflow. - - Args: - init_scale (float): Initial loss scale value, default: 2**32. - scale_factor (float): Factor used when adjusting the loss scale. - Default: 2. - mode (str): Loss scaling mode. 'dynamic' or 'static' - scale_window (int): Number of consecutive iterations without an - overflow to wait before increasing the loss scale. Default: 1000. - """ - - def __init__(self, - init_scale=2**32, - mode='dynamic', - scale_factor=2., - scale_window=1000): - self.cur_scale = init_scale - self.cur_iter = 0 - assert mode in ('dynamic', - 'static'), 'mode can only be dynamic or static' - self.mode = mode - self.last_overflow_iter = -1 - self.scale_factor = scale_factor - self.scale_window = scale_window - - def has_overflow(self, params): - """Check if params contain overflow.""" - if self.mode != 'dynamic': - return False - for p in params: - if p.grad is not None and LossScaler._has_inf_or_nan(p.grad.data): - return True - return False - - def _has_inf_or_nan(x): - """Check if params contain NaN.""" - try: - cpu_sum = float(x.float().sum()) - except RuntimeError as instance: - if 'value cannot be converted' not in instance.args[0]: - raise - return True - else: - if cpu_sum == float('inf') or cpu_sum == -float('inf') \ - or cpu_sum != cpu_sum: - return True - return False - - def update_scale(self, overflow): - """update the current loss scale value when overflow happens.""" - if self.mode != 'dynamic': - return - if overflow: - self.cur_scale = max(self.cur_scale / self.scale_factor, 1) - self.last_overflow_iter = self.cur_iter - else: - if (self.cur_iter - self.last_overflow_iter) % \ - self.scale_window == 0: - self.cur_scale *= self.scale_factor - self.cur_iter += 1 - - def state_dict(self): - """Returns the state of the scaler as a :class:`dict`.""" - return dict( - cur_scale=self.cur_scale, - cur_iter=self.cur_iter, - mode=self.mode, - last_overflow_iter=self.last_overflow_iter, - scale_factor=self.scale_factor, - scale_window=self.scale_window) - - def load_state_dict(self, state_dict): - """Loads the loss_scaler state dict. - - Args: - state_dict (dict): scaler state. - """ - self.cur_scale = state_dict['cur_scale'] - self.cur_iter = state_dict['cur_iter'] - self.mode = state_dict['mode'] - self.last_overflow_iter = state_dict['last_overflow_iter'] - self.scale_factor = state_dict['scale_factor'] - self.scale_window = state_dict['scale_window'] - - @property - def loss_scale(self): - return self.cur_scale diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/codecs/ffmpeg_lib/libswscale.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/codecs/ffmpeg_lib/libswscale.py deleted file mode 100644 index 8c135870d776e6b7d7129edaee81488d2019a9b4..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/codecs/ffmpeg_lib/libswscale.py +++ /dev/null @@ -1,56 +0,0 @@ -"""Wrapper for include/libswscale/swscale.h -""" -from ctypes import POINTER, Structure -from ctypes import c_int -from ctypes import c_uint8, c_double - -import pyglet.lib -from pyglet.util import debug_print -from . import compat - -_debug = debug_print('debug_media') - - -swscale = pyglet.lib.load_library( - 'swscale', - win32=('swscale-6', 'swscale-5'), - darwin=('swscale.6', 'swscale.5') -) - -swscale.swscale_version.restype = c_int - -compat.set_version('swscale', swscale.swscale_version() >> 16) - - -SWS_FAST_BILINEAR = 1 - - -class SwsContext(Structure): - pass - - -class SwsFilter(Structure): - pass - - -swscale.sws_getCachedContext.restype = POINTER(SwsContext) -swscale.sws_getCachedContext.argtypes = [POINTER(SwsContext), - c_int, c_int, c_int, c_int, - c_int, c_int, c_int, - POINTER(SwsFilter), POINTER(SwsFilter), - POINTER(c_double)] -swscale.sws_freeContext.argtypes = [POINTER(SwsContext)] -swscale.sws_scale.restype = c_int -swscale.sws_scale.argtypes = [POINTER(SwsContext), - POINTER(POINTER(c_uint8)), - POINTER(c_int), - c_int, c_int, - POINTER(POINTER(c_uint8)), - POINTER(c_int)] - -__all__ = [ - 'swscale', - 'SWS_FAST_BILINEAR', - 'SwsContext', - 'SwsFilter' -] diff --git a/spaces/achajon/prompthero-openjourney-v2/README.md b/spaces/achajon/prompthero-openjourney-v2/README.md deleted file mode 100644 index 73596e71f12d397f0a5135b19b7441141fd92ad6..0000000000000000000000000000000000000000 --- a/spaces/achajon/prompthero-openjourney-v2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Prompthero Openjourney V2 -emoji: 🌍 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/adirik/stylemc-demo/encoder4editing/training/ranger.py b/spaces/adirik/stylemc-demo/encoder4editing/training/ranger.py deleted file mode 100644 index 3d63264dda6df0ee40cac143440f0b5f8977a9ad..0000000000000000000000000000000000000000 --- a/spaces/adirik/stylemc-demo/encoder4editing/training/ranger.py +++ /dev/null @@ -1,164 +0,0 @@ -# Ranger deep learning optimizer - RAdam + Lookahead + Gradient Centralization, combined into one optimizer. - -# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer -# and/or -# https://github.com/lessw2020/Best-Deep-Learning-Optimizers - -# Ranger has now been used to capture 12 records on the FastAI leaderboard. - -# This version = 20.4.11 - -# Credits: -# Gradient Centralization --> https://arxiv.org/abs/2004.01461v2 (a new optimization technique for DNNs), github: https://github.com/Yonghongwei/Gradient-Centralization -# RAdam --> https://github.com/LiyuanLucasLiu/RAdam -# Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code. -# Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610 - -# summary of changes: -# 4/11/20 - add gradient centralization option. Set new testing benchmark for accuracy with it, toggle with use_gc flag at init. -# full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights), -# supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues. -# changes 8/31/19 - fix references to *self*.N_sma_threshold; -# changed eps to 1e-5 as better default than 1e-8. - -import math -import torch -from torch.optim.optimizer import Optimizer - - -class Ranger(Optimizer): - - def __init__(self, params, lr=1e-3, # lr - alpha=0.5, k=6, N_sma_threshhold=5, # Ranger options - betas=(.95, 0.999), eps=1e-5, weight_decay=0, # Adam options - use_gc=True, gc_conv_only=False - # Gradient centralization on or off, applied to conv layers only or conv + fc layers - ): - - # parameter checks - if not 0.0 <= alpha <= 1.0: - raise ValueError(f'Invalid slow update rate: {alpha}') - if not 1 <= k: - raise ValueError(f'Invalid lookahead steps: {k}') - if not lr > 0: - raise ValueError(f'Invalid Learning Rate: {lr}') - if not eps > 0: - raise ValueError(f'Invalid eps: {eps}') - - # parameter comments: - # beta1 (momentum) of .95 seems to work better than .90... - # N_sma_threshold of 5 seems better in testing than 4. - # In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you. - - # prep defaults and init torch.optim base - defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold, - eps=eps, weight_decay=weight_decay) - super().__init__(params, defaults) - - # adjustable threshold - self.N_sma_threshhold = N_sma_threshhold - - # look ahead params - - self.alpha = alpha - self.k = k - - # radam buffer for state - self.radam_buffer = [[None, None, None] for ind in range(10)] - - # gc on or off - self.use_gc = use_gc - - # level of gradient centralization - self.gc_gradient_threshold = 3 if gc_conv_only else 1 - - def __setstate__(self, state): - super(Ranger, self).__setstate__(state) - - def step(self, closure=None): - loss = None - - # Evaluate averages and grad, update param tensors - for group in self.param_groups: - - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data.float() - - if grad.is_sparse: - raise RuntimeError('Ranger optimizer does not support sparse gradients') - - p_data_fp32 = p.data.float() - - state = self.state[p] # get state dict for this param - - if len(state) == 0: # if first time to run...init dictionary with our desired entries - # if self.first_run_check==0: - # self.first_run_check=1 - # print("Initializing slow buffer...should not see this at load from saved model!") - state['step'] = 0 - state['exp_avg'] = torch.zeros_like(p_data_fp32) - state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) - - # look ahead weight storage now in state dict - state['slow_buffer'] = torch.empty_like(p.data) - state['slow_buffer'].copy_(p.data) - - else: - state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) - state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) - - # begin computations - exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] - beta1, beta2 = group['betas'] - - # GC operation for Conv layers and FC layers - if grad.dim() > self.gc_gradient_threshold: - grad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True)) - - state['step'] += 1 - - # compute variance mov avg - exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) - # compute mean moving avg - exp_avg.mul_(beta1).add_(1 - beta1, grad) - - buffered = self.radam_buffer[int(state['step'] % 10)] - - if state['step'] == buffered[0]: - N_sma, step_size = buffered[1], buffered[2] - else: - buffered[0] = state['step'] - beta2_t = beta2 ** state['step'] - N_sma_max = 2 / (1 - beta2) - 1 - N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) - buffered[1] = N_sma - if N_sma > self.N_sma_threshhold: - step_size = math.sqrt( - (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / ( - N_sma_max - 2)) / (1 - beta1 ** state['step']) - else: - step_size = 1.0 / (1 - beta1 ** state['step']) - buffered[2] = step_size - - if group['weight_decay'] != 0: - p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) - - # apply lr - if N_sma > self.N_sma_threshhold: - denom = exp_avg_sq.sqrt().add_(group['eps']) - p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom) - else: - p_data_fp32.add_(-step_size * group['lr'], exp_avg) - - p.data.copy_(p_data_fp32) - - # integrated look ahead... - # we do it at the param level instead of group level - if state['step'] % group['k'] == 0: - slow_p = state['slow_buffer'] # get access to slow param tensor - slow_p.add_(self.alpha, p.data - slow_p) # (fast weights - slow weights) * alpha - p.data.copy_(slow_p) # copy interpolated weights to RAdam param tensor - - return loss \ No newline at end of file diff --git a/spaces/aheskandani/FilesTools/app.py b/spaces/aheskandani/FilesTools/app.py deleted file mode 100644 index 374c0a0a2795353c629b2d2a239d4bf5ae09730a..0000000000000000000000000000000000000000 --- a/spaces/aheskandani/FilesTools/app.py +++ /dev/null @@ -1,273 +0,0 @@ -from telegram.ext.filters import filters -from telegram.ext.messagehandler import MessageHandler -from telegram import ParseMode -from telegram.ext import Updater, CommandHandler, ConversationHandler, CallbackQueryHandler -from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ForceReply, ReplyKeyboardMarkup -import urllib.request -from threading import Thread -from random import randint -import os - - -def start(update, context): - def thread_function(update, context): - chat_id = update.effective_chat.id - text = ('Hello dear user!') - context.bot.send_message(chat_id, text, reply_markup=main_keyboard) - thread = Thread(target=thread_function, args=(update,context)) - thread.start() - -def help(update, context): - def thread_function(update, context): - chat_id = update.effective_chat.id - text = ('Hello dear user!') - context.bot.send_message(chat_id, text, reply_markup=main_keyboard) - thread = Thread(target=thread_function, args=(update,context)) - thread.start() -# Download URL - -def download_url(update, context): - def thread_function(update, context): - chat_id = update.effective_chat.id - text = ('📝 <b>Send your file url</b>\n\n' - '⚠️ If you dont send your url before 60s the operation will cancel!') - context.bot.send_message(chat_id, text, parse_mode=ParseMode.HTML) - return 'GET_FILE_NAME' - thread = Thread(target=thread_function, args=(update,context)) - thread.start() - -def get_file_name(update, context): - def thread_function(update, context): - chat_id = update.effective_chat.id - context.user_data['url'] = update.message.text - try: - response = urllib.request.urlopen(urllib.request.Request(context.user_data['url'], method='HEAD')) - except: - text = ('⁉️ Sorry! Invalid url!') - context.bot.send_message(chat_id, text, reply_markup=main_keyboard) - return ConversationHandler.END - if response.status != 200: - text = ('⁉️ Sorry! Unable to download the url!') - context.bot.send_message(chat_id, text, reply_markup=main_keyboard) - return ConversationHandler.END - context.user_data['file_size'] = float(response.headers["Content-Length"]) / 2**20 - if context.user_data['file_size'] > 50: - text = ('🚫 Sorry! File size is larger than 50MB!') - context.bot.send_message(chat_id, text, reply_markup=main_keyboard) - return ConversationHandler.END - text = ('✅ <b>All right! Send your new file name</b>\n' - '💢 If you want to set file name automatically send /None command!\n\n' - '⚠️ If you dont send your url before 60s the operation will cancel!') - context.bot.send_message(chat_id, text, parse_mode=ParseMode.HTML) - return 'UPLOAD_FILE' - thread = Thread(target=thread_function, args=(update,context)) - thread.start() - -def upload_file(update, context): - def thread_function(update, context): - chat_id = update.effective_chat.id - context.user_data['file_name'] = update.message.text - if update.message.text == '/None': - context.user_data['file_name'] = context.user_data['url'].split('/')[-1] - else: - context.user_data['file_name'] = update.message.text - download_dir = path + 'temp/' + context.user_data['file_name'] - try: - urllib.request.urlretrieve(context.user_data['url'], download_dir) - context.bot.send_document(chat_id, - document=open(download_dir, 'rb'), - reply_markup=main_keyboard) - os.remove(download_dir) - except: - text = ('🚫 Sorry! Try again later!') - context.bot.send_message(chat_id, text, reply_markup=main_keyboard) - return ConversationHandler.END - thread = Thread(target=thread_function, args=(update,context)) - thread.start() - -def timeout_operation(update, context): - text = 'Timout ... 😴' - update.message.reply_text(text, reply_markup=main_keyboard) - return ConversationHandler.END - -def cancel_operation(update, context): - text = '⁉️ Operation Canceled!' - update.message.reply_text(text, reply_markup=main_keyboard) - return ConversationHandler.END - -# Rename Files - -def start_rename_files(update, context): - def thread_function(update, context): - chat_id = update.effective_chat.id - text = ('📝 <b>Send your file with size less than 50MB</b>\n\n' - '⚠️ If you dont send your file before 60s the operation will /cancel!') - context.bot.send_message(chat_id, text, parse_mode=ParseMode.HTML) - return 'GET_FILE' - thread = Thread(target=thread_function, args=(update,context)) - thread.start() - -def get_file(update, context): - def thread_function(update, context): - chat_id = update.effective_chat.id - context.user_data['file'] = update.message.document - file_name = update.message.document.file_name - if update.message.document.file_size > 20 * 2**20: - update.message.reply_text('🚫 Sorry! File size larger than 20 MB!') - return ConversationHandler.END - file_extension = file_name.split('.')[-1] - if len(file_extension) > 5: - context.user_data['ext'] = 'None' - else: - context.user_data['ext'] = file_name.split('.')[-1] - text = ('📝 <b>Send your new file name</b>\n' - f'💢 If your name has not file extension we will add <b>.{context.user_data["ext"]}</b> to your file name\n\n' - '⚠️ If you dont send your file before 60s the operation will /cancel!') - context.bot.send_message(chat_id, text, reply_markup=ForceReply(), parse_mode=ParseMode.HTML) - return 'GO_TO_RENAME' - thread = Thread(target=thread_function, args=(update,context)) - thread.start() - -def rename_file_and_upload(update, context): - def thread_function(update, context): - chat_id = update.effective_chat.id - file_name = update.message.text - if file_name.find('.') < 0 and context.user_data['ext']: - file_name += ('.' + context.user_data['ext']) - with open(f'{path}/temp/{file_name}', 'wb') as f: - context.bot.get_file(context.user_data['file']).download(out = f) - context.bot.send_document(chat_id, document=open(f'{path}/temp/{file_name}', 'rb'), reply_markup=main_keyboard) - os.remove(f'{path}/temp/{file_name}') - return ConversationHandler.END - thread = Thread(target=thread_function, args=(update,context)) - thread.start() - -# Sync Excel Files - -def start_excel_sync(update, context): - def thread_function(update, context): - chat_id = update.effective_chat.id - text = ('📝 <b>Send the firts excel file</b>\n\n' - '⚠️ If you dont send your file before 60s the operation will /cancel!') - context.bot.send_message(chat_id, text, parse_mode=ParseMode.HTML) - return 'GET_FIRST_EXCEL' - thread = Thread(target=thread_function, args=(update,context)) - thread.start() - -def get_first_excel(update, context): - def thread_function(update, context): - chat_id = update.effective_chat.id - context.user_data['first_excel'] = update.message.document - if update.message.document.file_name.split('.')[-1] not in ['xlsx', 'xls']: - text = ('🚫 Sorry! You can just send excel files!') - context.bot.send_message(chat_id, text, reply_markup=main_keyboard) - return ConversationHandler.END - text = ('📝 <b>Send the second excel file</b>\n\n' - '⚠️ If you dont send your file before 60s the operation will /cancel!') - context.bot.send_message(chat_id, text, parse_mode=ParseMode.HTML) - return 'GET_SECOND_EXCEL' - thread = Thread(target=thread_function, args=(update,context)) - thread.start() - -def get_second_excel(update, context): - def thread_function(update, context): - chat_id = update.effective_chat.id - context.user_data['second_excel'] = update.message.document - if update.message.document.file_name.split('.')[-1] not in ['xlsx', 'xls']: - text = ('🚫 Sorry! You can just send excel files!') - context.bot.send_message(chat_id, text, reply_markup=main_keyboard) - return ConversationHandler.END - text = ('📝 <b>Send the column name that you want merge on it</b>\n\n' - '⚠️ If you dont send your name before 60s the operation will /cancel!') - context.bot.send_message(chat_id, text, parse_mode=ParseMode.HTML) - return 'UPLOAD_FILE' - thread = Thread(target=thread_function, args=(update,context)) - thread.start() - -def merge_and_upload(update, context): - def thread_function(update, context): - id = randint(1000000000, 9999999999) - chat_id = update.effective_chat.id - with open(f'{path}/temp/{id}_a.xlsx', 'wb') as f: - context.bot.get_file(context.user_data['first_excel']).download(out=f) - with open(f'{path}/temp/{id}_b.xlsx', 'wb') as f: - context.bot.get_file(context.user_data['second_excel']).download(out=f) - if merge_two_excel(id, update.message.document): - context.bot.send_document(chat_id, - document=open(f'{path}/temp/s_{id}.xlsx', 'rb'), - reply_markup=main_keyboard) - else: - os.remove(f'{path}/temp/{id}_a.xlsx') - os.remove(f'{path}/temp/{id}_b.xlsx') - text = '⁉️ <b>ّSorry! Operation has been failed</b>\n\n' - context.bot.send_message(chat_id, text, reply_markup=main_keyboard, parse_mode=ParseMode.HTML) - return ConversationHandler.END - thread = Thread(target=thread_function, args=(update,context)) - thread.start() - -def merge_two_excel(id, column_tag): - try: - import pandas as pd - data_1 = pd.read_excel(f'{path}/temp/{id}_a.xlsx') - data_2 = pd.read_excel(f'{path}/temp/{id}_b.xlsx') - data = data_1.merge(data_2, on=column_tag, how='outer', suffixes=('_x', '_y')) - data = (data.rename(columns = lambda x: x.replace('_x', '')).fillna(data.filter(regex='_y$') - .rename(columns = lambda x: x.replace('_y', ''))).filter(regex=r'.*(?<!_y)$')) - data.to_excel(f'{path}/temp/s_{id}.xlsx', index=False) - os.remove(f'{path}/temp/{id}_a.xlsx') - os.remove(f'{path}/temp/{id}_b.xlsx') - return True - except: - return False - -# Main function -if __name__ == '__main__': - # Bot Configs - TOKEN = '5931628423:AAEztLAlYWOs-RwpN6Bb0D0Xkqt2JvSNFQY' - admin_id = 37087739 - updater = Updater(token=TOKEN) - path = '/'.join(__file__.split('/')[:-1]) + '/' - # Keyboards - buttons = [['Download Url', 'Rename File'], ['Sync Excel']] - main_keyboard = ReplyKeyboardMarkup(buttons, one_time_keyboard=True, resize_keyboard=True) - # Command Handlers - updater.dispatcher.add_handler(CommandHandler('start', start, filters.chat_type.private)) - # Conversation Handlers - updater.dispatcher.add_handler(ConversationHandler( - entry_points=[MessageHandler(filters.text('Download Url') , download_url)], - states={ - 'GET_FILE_NAME': [MessageHandler(filters.text, get_file_name)], - 'UPLOAD_FILE': [MessageHandler(filters.text, upload_file)], - ConversationHandler.TIMEOUT: [MessageHandler(filters.all, timeout_operation)] - }, - fallbacks=[CommandHandler('cancel', cancel_operation)], - conversation_timeout=30, - )) - updater.dispatcher.add_handler(ConversationHandler( - entry_points=[MessageHandler(filters.text('Rename File') , start_rename_files)], - states={ - 'GET_FILE': [MessageHandler(filters.document, get_file)], - 'GO_TO_RENAME': [MessageHandler(filters.text, rename_file_and_upload)], - ConversationHandler.TIMEOUT: [MessageHandler(filters.all, timeout_operation)] - }, - fallbacks=[CommandHandler('cancel', cancel_operation)], - conversation_timeout=30, - )) - updater.dispatcher.add_handler(ConversationHandler( - entry_points=[MessageHandler(filters.text('Sync Excel'), start_excel_sync)], - states={ - 'GET_FIRST_EXCEL': [MessageHandler(filters.document, get_first_excel)], - 'GET_SECOND_EXCEL': [MessageHandler(filters.document, get_second_excel)], - 'UPLOAD_FILE': [MessageHandler(filters.text, merge_and_upload)], - ConversationHandler.TIMEOUT: [MessageHandler(filters.all, timeout_operation)] - }, - fallbacks=[CommandHandler('cancel', cancel_operation)], - conversation_timeout=120, - )) - # Message Handlers - updater.dispatcher.add_handler(MessageHandler(filters.text('Help') & filters.chat_type.private, help)) - #updater.dispatcher.add_handler(MessageHandler(filters.text & filters.chat_type.private, other)) - # Start Bot - updater.start_polling() - print('Bot is started ...') - updater.idle() \ No newline at end of file diff --git a/spaces/akhaliq/arcanegannewtheme/app.py b/spaces/akhaliq/arcanegannewtheme/app.py deleted file mode 100644 index cc427f8a0a4350c565cda705218f9f583528affd..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/arcanegannewtheme/app.py +++ /dev/null @@ -1,6 +0,0 @@ -import os -os.system("pip install gradio==2.9b11") -import gradio as gr - -gr.Interface.load("spaces/akhaliq/ArcaneGAN").launch() - diff --git a/spaces/akhaliq/deeplab2/model/layers/axial_layers.py b/spaces/akhaliq/deeplab2/model/layers/axial_layers.py deleted file mode 100644 index 48e2f8651c1f3ea1b8eeafc987ffbf6bae753161..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/deeplab2/model/layers/axial_layers.py +++ /dev/null @@ -1,523 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Deeplab2 Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implements Axial-Attention layers proposed in Axial-DeepLab. - -Axial-Attention factorizes 2D self-attention into two 1D self-attentions, so -that it can be applied on large inputs. Axial-Attention is typically used to -replace 3x3 convolutions in a bottleneck residual block. - -[1] Axial-Deeplab: Stand-Alone Axial-Attention for Panoptic Segmentation, - ECCV 2020 Spotlight. - Huiyu Wang, Yukun Zhu, Bradley Green, Hartwig Adam, Alan Yuille, - Liang-Chieh Chen. -""" - -import numpy as np -import tensorflow as tf - -from deeplab2.model import utils -from deeplab2.model.layers import activations -from deeplab2.model.layers import positional_encodings - - -class AxialAttention(tf.keras.layers.Layer): - """An axial-attention layer.""" - - def __init__(self, - query_shape=129, - memory_flange=32, - total_key_depth=512, - total_value_depth=1024, - num_heads=8, - name='axial_attention', - use_query_rpe_similarity=True, - use_key_rpe_similarity=True, - use_content_similarity=True, - retrieve_value_rpe=True, - retrieve_value_content=True, - initialization_std_for_query_key_rpe=1.0, - initialization_std_for_value_rpe=1.0, - self_attention_activation='softmax', - bn_layer=tf.keras.layers.BatchNormalization, - conv_kernel_weight_decay=0.0): - """Initializes an axial-attention layer. - - This function is designed to support both global and local axial-attention - in a unified way. If query_shape is larger than the length of input, a - global attention is applied. If query_shape is smaller than the length of - input, a local attention is applied. In this case, the input is divided into - blocks of length query_shape, padded by memory_flange on both sides. Then, - local attention is applied within each query block. The choice of - query_shape does not affect the output value but affects computation - efficiency and memory usage. In general, use global attention (large - query_shape) if possible. Local axial-attention has not been supported yet. - - Args: - query_shape: An integer, the block size for local axial attention. - Defaults to 129 since 129 is usually the largest feature map where we do - global attention (1025 with stride 8, or 2049 with stride 16). - memory_flange: An integer, the memory flange padded to each query block in - local attention. It has no effect in global attention. Defaults to 32, - which is equivalent to a span of 65 in Aixal-DeepLab paper -- A pixel - can see 32 pixels on its left and 32 pixels on its right. - total_key_depth: An integer, the total depth of keys, which is also the - depth of queries and the depth of key (query) positional encodings. - total_value_depth: An integer, the total depth of the values, which is - also the depth of value positional encodings. - num_heads: An integer, the number of heads in multi-head attention. - name: A string, the name of this axial attention layer. - use_query_rpe_similarity: A boolean, whether to use the attention - similarity between the queries and the relative positional encodings. - use_key_rpe_similarity: A boolean, whether to use the attention similarity - between the keys and the relative positional encodings. - use_content_similarity: A boolean, whether to use the content similarity - between the queries and the keys. - retrieve_value_rpe: A boolean, whether to retrieve the relative positional - encodings of the values. - retrieve_value_content: A boolean, whether to retrieve the content of the - values. - initialization_std_for_query_key_rpe: A float, the initialization std for - the relative positional encodings of the queries and keys. - initialization_std_for_value_rpe: A float, the initialization std for the - relative positional encodings of the values. - self_attention_activation: A string, type of activation function for - self-attention. Support 'sigmoid' and 'softmax'. - bn_layer: A tf.keras.layers.Layer that computes the normalization - (default: tf.keras.layers.BatchNormalization). - conv_kernel_weight_decay: A float, the weight decay for convolution - kernels. - - Returns: - output: A [batch, length, total_value_depth] tensor. - - Raises: - ValueError: If none of the three similarities (use_query_rpe_similarity, - use_key_rpe_similarity, use_content_similarity) is used. - ValueError: If neither of value content or value rpe is retrieved. - ValueError: If self_attention_activation is not supported. - ValueError: If total_key_depth is not divisible by num_heads. - ValueError: If total_value_depth is not divisible by num_heads. - """ - # Validate the attention similarity choices. - if not any([ - use_content_similarity, use_key_rpe_similarity, use_query_rpe_similarity - ]): - raise ValueError( - 'Should use at least one similarity to compute attention.') - - # Validate the retrieve value choices. - if not retrieve_value_content and not retrieve_value_rpe: - raise ValueError('Should retrieve at least one of content or rpe.') - - if total_key_depth % num_heads: - raise ValueError('Total_key_depth should be divisible by num_heads.') - - if total_value_depth % num_heads: - raise ValueError('Total_value_depth should be divisible by num_heads.') - - super(AxialAttention, self).__init__(name=name) - self._query_shape = query_shape - self._memory_flange = memory_flange - self._total_key_depth = total_key_depth - self._total_value_depth = total_value_depth - self._num_heads = num_heads - self._use_query_rpe_similarity = use_query_rpe_similarity - self._use_key_rpe_similarity = use_key_rpe_similarity - self._use_content_similarity = use_content_similarity - self._retrieve_value_rpe = retrieve_value_rpe - self._retrieve_value_content = retrieve_value_content - self._initialization_std_for_query_key_rpe = ( - initialization_std_for_query_key_rpe) - self._initialization_std_for_value_rpe = initialization_std_for_value_rpe - self._self_attention_activation = self_attention_activation - self._conv_kernel_weight_decay = conv_kernel_weight_decay - - self._batch_norm_qkv = bn_layer(axis=-1, name='batch_norm_qkv') - self._batch_norm_similarity = bn_layer( - axis=[0, 2], name='batch_norm_similarity') - self._batch_norm_retrieved_output = bn_layer( - axis=[0, 2, 4], name='batch_norm_retrieved_output') - - self._key_depth_per_head = total_key_depth // num_heads - self._attention_activate_fn = activations.get_activation( - self_attention_activation) - - def build(self, input_shape): - """Builds axial-attention layer weights. - - Args: - input_shape: An integer list of length 3, the shape of the input tensor. - - Raises: - NotImplementedError: Local axial-attention has not been implemented. It is - triggered if query_shape is less than input_shape. - """ - - # Apply global attention if query_shape is larger than input_shape[1]. - if self._query_shape >= input_shape[1]: - self._query_shape = input_shape[1] - self._memory_flange = 0 - else: - raise NotImplementedError('Local axial attention has not been ' - 'implemented yet.') - self._memory_shape = self._query_shape + 2 * self._memory_flange - - # Compute query key value with one convolution and an optional batch norm. - # The initialization std is standard transformer initialization (without - # batch norm), as used in SASA and ViT. In our case, we use batch norm by - # default, so it does not require careful tuning. If one wants to remove - # all batch norms in axial attention, this standard initialization should - # still be good, but a more careful initialization is encouraged. - self.qkv_kernel = self.add_weight( - name='qkv_kernel', - shape=[input_shape[-1], - self._total_key_depth * 2 + self._total_value_depth], - initializer=tf.keras.initializers.TruncatedNormal( - stddev=input_shape[-1]**-0.5), - regularizer=tf.keras.regularizers.l2(self._conv_kernel_weight_decay)) - - if self._use_query_rpe_similarity: - self._query_rpe = positional_encodings.RelativePositionalEncoding( - self._query_shape, - self._memory_shape, - self._key_depth_per_head, - self._num_heads, - 'query_rpe', - initialization_std=self._initialization_std_for_query_key_rpe, - conv_kernel_weight_decay=self._conv_kernel_weight_decay) - - if self._use_key_rpe_similarity: - self._key_rpe = positional_encodings.RelativePositionalEncoding( - self._query_shape, - self._memory_shape, - self._key_depth_per_head, - self._num_heads, - 'key_rpe', - initialization_std=self._initialization_std_for_query_key_rpe, - conv_kernel_weight_decay=self._conv_kernel_weight_decay) - - if self._retrieve_value_rpe: - self._value_rpe = positional_encodings.RelativePositionalEncoding( - self._query_shape, - self._memory_shape, - self._total_value_depth // self._num_heads, - self._num_heads, - 'value_rpe', - initialization_std=self._initialization_std_for_value_rpe, - conv_kernel_weight_decay=self._conv_kernel_weight_decay) - - def call(self, input_tensor, training=False): - """Performs a forward pass. - - Args: - input_tensor: An input [batch, length, channel] tensor. - training: A boolean flag indicating whether training behavior should be - used (default: False). - - Returns: - output: An output [batch, length, total_value_depth] tensor. - """ - # Alternatively, the einsum can be implemented as a 1x1 convolution. - # However, it is not obvious which implementation is more efficient (without - # careful benchmarking), so we use einsum for its flexibility and - # consistency with other parts of the function. - query_key_value = tf.einsum( - 'nlc,cd->nld', input_tensor, self.qkv_kernel, name='compute_qkv') - query_key_value = self._batch_norm_qkv(query_key_value, training=training) - - # Split query key value. - query, key, value = tf.split( - query_key_value, - [self._total_key_depth, self._total_key_depth, self._total_value_depth], - axis=-1) - - # Reshape the query, key, and value. - query = tf.reshape(query, [-1, self._query_shape, self._num_heads, - self._key_depth_per_head]) - query = tf.transpose(a=query, perm=[0, 2, 1, 3]) - key = tf.reshape(key, [-1, np.prod(self._memory_shape), self._num_heads, - self._key_depth_per_head]) - key = tf.transpose(a=key, perm=[0, 2, 1, 3]) - value = tf.reshape(value, [-1, np.prod(self._memory_shape), self._num_heads, - self._total_value_depth // self._num_heads]) - - # Gather all similarity logits into a list. - similarity_logits = [] - - # Compute the content similarity term: q * k. - if self._use_content_similarity: - content_similarity = tf.einsum( - 'bhld,bhmd->bhlm', query, key, name='content_similarity') - similarity_logits.append(content_similarity) - - # Compute the query rpe similarity term: q * rpe. - if self._use_query_rpe_similarity: - query_rpe = self._query_rpe(None) - query_rpe_similarity = tf.einsum( - 'bhld,hlmd->bhlm', query, query_rpe, name='query_rpe_similarity') - similarity_logits.append(query_rpe_similarity) - - # Compute the key rpe similarity term: k * rpe. - if self._use_key_rpe_similarity: - key_rpe = self._key_rpe(None) - key_rpe_similarity = tf.einsum( - 'bhmd,hlmd->bhlm', key, key_rpe, name='key_rpe_similarity') - similarity_logits.append(key_rpe_similarity) - - # Apply an optional batch norm to the similarities and sum them. - similarity_logits = tf.stack(similarity_logits) - similarity_logits = self._batch_norm_similarity(similarity_logits, - training=training) - similarity_logits = tf.reduce_sum(input_tensor=similarity_logits, axis=0) - - # Apply an attention activation function, e.g. softmax. - weights = self._attention_activate_fn(similarity_logits) - - # Gather retrieved values or rpes into a list. - retrieve_list = [] - - # Retrieve the content of the attended value. - if self._retrieve_value_content: - retrieved_content = tf.einsum( - 'bhlm,bmhd->bhld', weights, value, name='retrieve_value_content') - retrieve_list.append(retrieved_content) - - # Retrieve the relative position of the attended value. - if self._retrieve_value_rpe: - value_rpe = self._value_rpe(None) - retrieved_rpe = tf.einsum( - 'bhlm,hlmd->bhld', weights, value_rpe, name='retrieve_value_rpe') - retrieve_list.append(retrieved_rpe) - - # Apply batch norms to retrieved contents and rpes respectively. - retrieved_output = tf.stack(retrieve_list) - retrieved_output = self._batch_norm_retrieved_output(retrieved_output, - training=training) - # Additive contents and rpes. - retrieved_output = tf.reduce_sum(input_tensor=retrieved_output, axis=0) - - # Combine the heads by transposing and reshaping the tensor. - retrieved_output = utils.transpose_and_reshape_for_attention_operation( - retrieved_output) - - return retrieved_output - - -class AxialAttention2D(tf.keras.layers.Layer): - """Sequentially applies height-axis and width-axis axial-attention.""" - - def __init__(self, - strides=1, - filters=512, - name='attention', - key_expansion=1, - value_expansion=2, - query_shape=(129, 129), - memory_flange=(32, 32), - **kwargs): - """Initializes an AxialAttention2D layer. - - Args: - strides: An integer, the stride for the output, usually 1 or 2. - filters: An integer, the base number of channels for the layer. - name: A string, the name of the attention layer. - key_expansion: A float, the channel expansion ratio for keys. - value_expansion: A float, the channel expansion ratio for values. - query_shape: An integer, the maximum query shape for both the height axis - and the width axis. - memory_flange: An integer list of length 2. The memory flange for the - height axis and the width axis. - **kwargs: A dictionary of keyword arguments passed to height-axis, - width-axis, and 2D global AxialAttention. - - Returns: - output: A [batch, strided height, strided width, output_channels] tensor. - """ - super(AxialAttention2D, self).__init__(name=name) - total_key_depth = int(round(filters * key_expansion)) - total_value_depth = int(round(filters * value_expansion)) - self._strides = strides - self._total_key_depth = total_key_depth - self._total_value_depth = total_value_depth - self._height_axis = AxialAttention( - total_key_depth=total_key_depth, - total_value_depth=total_value_depth, - query_shape=query_shape[0], - memory_flange=memory_flange[0], - name='height_axis', - **kwargs) - self._width_axis = AxialAttention( - total_key_depth=total_key_depth, - total_value_depth=total_value_depth, - query_shape=query_shape[1], - memory_flange=memory_flange[1], - name='width_axis', - **kwargs) - - def call(self, inputs, training=False): - """Performs a forward pass. - - Args: - inputs: An input [batch, height, width, channel] tensor. - training: A boolean flag indicating whether training behavior should be - used (default: False). - - Returns: - output: An output [batch, strided_height, strided_width, - filters * value_expansion] tensor. - """ - _, height, width, channel = inputs.get_shape().as_list() - - # Transpose and reshape the width axis to the batch dimension. - x = tf.transpose(a=inputs, perm=[0, 2, 1, 3]) - x = tf.reshape(x, [-1, height, channel]) - x = self._height_axis(x, training=training) - # Reshape and transpose back to a 4D tensor. - x = tf.reshape(x, [-1, width, height, self._total_value_depth]) - x = tf.transpose(a=x, perm=[0, 2, 1, 3]) - # Height axis striding. - if self._strides > 1: - x = x[:, ::self._strides, :, :] - - # Reshape the height axis to the batch dimension. - _, strided_height, _, _ = x.get_shape().as_list() - x = tf.reshape(x, [-1, width, self._total_value_depth]) - x = self._width_axis(x, training=training) - # Reshape back to a 4D tensor. - x = tf.reshape(x, [-1, strided_height, width, self._total_value_depth]) - # Width axis striding. - if self._strides > 1: - x = x[:, :, ::self._strides, :] - - return x - - -class GlobalAttention2D(tf.keras.layers.Layer): - """A 2D global attention layer.""" - - def __init__(self, - strides=1, - filters=512, - name='attention', - key_expansion=1, - value_expansion=2, - query_shape=(129, 129), - memory_flange=(32, 32), - double_global_attention=False, - **kwargs): - """Initializes a GlobalAttention2D layer. - - Args: - strides: An integer, the stride for the output, usually 1 or 2. - filters: An integer, the base number of channels for the layer. - name: A string, the name of the attention layer. - key_expansion: A float, the channel expansion ratio for keys. - value_expansion: A float, the channel expansion ratio for values. - query_shape: An integer, the maximum query shape for both the height axis - and the width axis. - memory_flange: An integer list of length 2. The memory flange for the - height axis and the width axis. - double_global_attention: A boolean, whether to use two global attention - layers. Two global attention layers match the parameter count to a - seqentially applied height and width axial attention layer. - **kwargs: A dictionary of keyword arguments passed to height-axis, - width-axis, and 2D global AxialAttention. - - Returns: - output: A [batch, strided height, strided width, output_channels] tensor. - - Raises: - ValueError: If relative positional encoding is enforced in kwargs. - """ - if any([kwargs.get('use_query_rpe_similarity', False), - kwargs.get('use_key_rpe_similarity', False), - kwargs.get('retrieve_value_rpe', False)]): - raise ValueError('GlobalAttention2D does not support relative positional ' - 'encodings.') - - super(GlobalAttention2D, self).__init__(name=name) - total_key_depth = int(round(filters * key_expansion)) - total_value_depth = int(round(filters * value_expansion)) - self._strides = strides - self._double_global_attention = double_global_attention - self._total_key_depth = total_key_depth - self._total_value_depth = total_value_depth - - # Global attention does not support relative positional encodings. - kwargs['use_query_rpe_similarity'] = False - kwargs['use_key_rpe_similarity'] = False - kwargs['retrieve_value_rpe'] = False - self._kwargs = kwargs - - def build(self, input_shape): - """Builds global attention layers according to the 4D input_shape.""" - _, height, width, _ = input_shape - # Implement 2D global attention as 1D axial-attention by flattening the 2D - # inputs into 1D. We also disable the relative positional encodings in - # axial attention, so that only content-based attention is used. The query - # shape is set to height * width, so that the axial attention is global. - self._global = AxialAttention( - total_key_depth=self._total_key_depth, - total_value_depth=self._total_value_depth, - query_shape=height*width, - memory_flange=0, - name='global', - **self._kwargs) - - # Use two global attention layers in one residual block. This option - # ensures that global attention models have similar number of layers and - # parameters as axial-attention models. - if self._double_global_attention: - self._global2 = AxialAttention( - total_key_depth=self._total_key_depth, - total_value_depth=self._total_value_depth, - query_shape=height*width, - memory_flange=0, - name='global2', - **self._kwargs) - - def call(self, inputs, training=False): - """Performs a forward pass. - - Args: - inputs: An input [batch, height, width, channel] tensor. - training: A boolean flag indicating whether training behavior should be - used (default: False). - - Returns: - output: An output [batch, strided_height, strided_width, - filters * value_expansion] tensor. - """ - _, height, width, channel = inputs.get_shape().as_list() - - # Reshape the inputs so that the attention is global 2D. - x = tf.reshape(inputs, [-1, height * width, channel]) - - # Implement 2D global attention as 1D axial-attention by flattening the 2D - # inputs into 1D. We also disable the relative positional encodings in - # axial attention, so that only content-based attention is used. - x = self._global(x, training=training) - - # Use two global attention layers in one residual block. This option - # ensures that global attention models have the same number of layers and - # parameters as axial-attention models. - if self._double_global_attention: - x = self._global2(x, training=training) - x = tf.reshape(x, [-1, height, width, self._total_value_depth]) - if self._strides > 1: - x = x[:, ::self._strides, ::self._strides, :] - - return x diff --git a/spaces/akhaliq/stylegan3_clip/calc_metrics.py b/spaces/akhaliq/stylegan3_clip/calc_metrics.py deleted file mode 100644 index 74a398a407f56e749e3a88eb9d8ff976191758f4..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/stylegan3_clip/calc_metrics.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Calculate quality metrics for previous training run or pretrained network pickle.""" - -import os -import click -import json -import tempfile -import copy -import torch - -import dnnlib -import legacy -from metrics import metric_main -from metrics import metric_utils -from torch_utils import training_stats -from torch_utils import custom_ops -from torch_utils import misc -from torch_utils.ops import conv2d_gradfix - -#---------------------------------------------------------------------------- - -def subprocess_fn(rank, args, temp_dir): - dnnlib.util.Logger(should_flush=True) - - # Init torch.distributed. - if args.num_gpus > 1: - init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init')) - if os.name == 'nt': - init_method = 'file:///' + init_file.replace('\\', '/') - torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus) - else: - init_method = f'file://{init_file}' - torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus) - - # Init torch_utils. - sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None - training_stats.init_multiprocessing(rank=rank, sync_device=sync_device) - if rank != 0 or not args.verbose: - custom_ops.verbosity = 'none' - - # Configure torch. - device = torch.device('cuda', rank) - torch.backends.cuda.matmul.allow_tf32 = False - torch.backends.cudnn.allow_tf32 = False - conv2d_gradfix.enabled = True - - # Print network summary. - G = copy.deepcopy(args.G).eval().requires_grad_(False).to(device) - if rank == 0 and args.verbose: - z = torch.empty([1, G.z_dim], device=device) - c = torch.empty([1, G.c_dim], device=device) - misc.print_module_summary(G, [z, c]) - - # Calculate each metric. - for metric in args.metrics: - if rank == 0 and args.verbose: - print(f'Calculating {metric}...') - progress = metric_utils.ProgressMonitor(verbose=args.verbose) - result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs, - num_gpus=args.num_gpus, rank=rank, device=device, progress=progress) - if rank == 0: - metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl) - if rank == 0 and args.verbose: - print() - - # Done. - if rank == 0 and args.verbose: - print('Exiting...') - -#---------------------------------------------------------------------------- - -def parse_comma_separated_list(s): - if isinstance(s, list): - return s - if s is None or s.lower() == 'none' or s == '': - return [] - return s.split(',') - -#---------------------------------------------------------------------------- - -@click.command() -@click.pass_context -@click.option('network_pkl', '--network', help='Network pickle filename or URL', metavar='PATH', required=True) -@click.option('--metrics', help='Quality metrics', metavar='[NAME|A,B,C|none]', type=parse_comma_separated_list, default='fid50k_full', show_default=True) -@click.option('--data', help='Dataset to evaluate against [default: look up]', metavar='[ZIP|DIR]') -@click.option('--mirror', help='Enable dataset x-flips [default: look up]', type=bool, metavar='BOOL') -@click.option('--gpus', help='Number of GPUs to use', type=int, default=1, metavar='INT', show_default=True) -@click.option('--verbose', help='Print optional information', type=bool, default=True, metavar='BOOL', show_default=True) - -def calc_metrics(ctx, network_pkl, metrics, data, mirror, gpus, verbose): - """Calculate quality metrics for previous training run or pretrained network pickle. - - Examples: - - \b - # Previous training run: look up options automatically, save result to JSONL file. - python calc_metrics.py --metrics=eqt50k_int,eqr50k \\ - --network=~/training-runs/00000-stylegan3-r-mydataset/network-snapshot-000000.pkl - - \b - # Pre-trained network pickle: specify dataset explicitly, print result to stdout. - python calc_metrics.py --metrics=fid50k_full --data=~/datasets/ffhq-1024x1024.zip --mirror=1 \\ - --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhq-1024x1024.pkl - - \b - Recommended metrics: - fid50k_full Frechet inception distance against the full dataset. - kid50k_full Kernel inception distance against the full dataset. - pr50k3_full Precision and recall againt the full dataset. - ppl2_wend Perceptual path length in W, endpoints, full image. - eqt50k_int Equivariance w.r.t. integer translation (EQ-T). - eqt50k_frac Equivariance w.r.t. fractional translation (EQ-T_frac). - eqr50k Equivariance w.r.t. rotation (EQ-R). - - \b - Legacy metrics: - fid50k Frechet inception distance against 50k real images. - kid50k Kernel inception distance against 50k real images. - pr50k3 Precision and recall against 50k real images. - is50k Inception score for CIFAR-10. - """ - dnnlib.util.Logger(should_flush=True) - - # Validate arguments. - args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, network_pkl=network_pkl, verbose=verbose) - if not all(metric_main.is_valid_metric(metric) for metric in args.metrics): - ctx.fail('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics())) - if not args.num_gpus >= 1: - ctx.fail('--gpus must be at least 1') - - # Load network. - if not dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl): - ctx.fail('--network must point to a file or URL') - if args.verbose: - print(f'Loading network from "{network_pkl}"...') - with dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f: - network_dict = legacy.load_network_pkl(f) - args.G = network_dict['G_ema'] # subclass of torch.nn.Module - - # Initialize dataset options. - if data is not None: - args.dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data) - elif network_dict['training_set_kwargs'] is not None: - args.dataset_kwargs = dnnlib.EasyDict(network_dict['training_set_kwargs']) - else: - ctx.fail('Could not look up dataset options; please specify --data') - - # Finalize dataset options. - args.dataset_kwargs.resolution = args.G.img_resolution - args.dataset_kwargs.use_labels = (args.G.c_dim != 0) - if mirror is not None: - args.dataset_kwargs.xflip = mirror - - # Print dataset options. - if args.verbose: - print('Dataset options:') - print(json.dumps(args.dataset_kwargs, indent=2)) - - # Locate run dir. - args.run_dir = None - if os.path.isfile(network_pkl): - pkl_dir = os.path.dirname(network_pkl) - if os.path.isfile(os.path.join(pkl_dir, 'training_options.json')): - args.run_dir = pkl_dir - - # Launch processes. - if args.verbose: - print('Launching processes...') - torch.multiprocessing.set_start_method('spawn') - with tempfile.TemporaryDirectory() as temp_dir: - if args.num_gpus == 1: - subprocess_fn(rank=0, args=args, temp_dir=temp_dir) - else: - torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus) - -#---------------------------------------------------------------------------- - -if __name__ == "__main__": - calc_metrics() # pylint: disable=no-value-for-parameter - -#---------------------------------------------------------------------------- diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/commands/hash.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/commands/hash.py deleted file mode 100644 index 042dac813e74b8187c3754cb9a937c7f7183e331..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/commands/hash.py +++ /dev/null @@ -1,59 +0,0 @@ -import hashlib -import logging -import sys -from optparse import Values -from typing import List - -from pip._internal.cli.base_command import Command -from pip._internal.cli.status_codes import ERROR, SUCCESS -from pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES -from pip._internal.utils.misc import read_chunks, write_output - -logger = logging.getLogger(__name__) - - -class HashCommand(Command): - """ - Compute a hash of a local package archive. - - These can be used with --hash in a requirements file to do repeatable - installs. - """ - - usage = "%prog [options] <file> ..." - ignore_require_venv = True - - def add_options(self) -> None: - self.cmd_opts.add_option( - "-a", - "--algorithm", - dest="algorithm", - choices=STRONG_HASHES, - action="store", - default=FAVORITE_HASH, - help="The hash algorithm to use: one of {}".format( - ", ".join(STRONG_HASHES) - ), - ) - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options: Values, args: List[str]) -> int: - if not args: - self.parser.print_usage(sys.stderr) - return ERROR - - algorithm = options.algorithm - for path in args: - write_output( - "%s:\n--hash=%s:%s", path, algorithm, _hash_of_file(path, algorithm) - ) - return SUCCESS - - -def _hash_of_file(path: str, algorithm: str) -> str: - """Return the hash digest of a file.""" - with open(path, "rb") as archive: - hash = hashlib.new(algorithm) - for chunk in read_chunks(archive): - hash.update(chunk) - return hash.hexdigest() diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/__init__.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/__init__.py deleted file mode 100644 index 80ad2546d7981394b5f5d221336c9f00236b9d66..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/__init__.py +++ /dev/null @@ -1,83 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - - -from .universaldetector import UniversalDetector -from .enums import InputState -from .version import __version__, VERSION - - -__all__ = ['UniversalDetector', 'detect', 'detect_all', '__version__', 'VERSION'] - - -def detect(byte_str): - """ - Detect the encoding of the given byte string. - - :param byte_str: The byte sequence to examine. - :type byte_str: ``bytes`` or ``bytearray`` - """ - if not isinstance(byte_str, bytearray): - if not isinstance(byte_str, bytes): - raise TypeError('Expected object of type bytes or bytearray, got: ' - '{}'.format(type(byte_str))) - else: - byte_str = bytearray(byte_str) - detector = UniversalDetector() - detector.feed(byte_str) - return detector.close() - - -def detect_all(byte_str): - """ - Detect all the possible encodings of the given byte string. - - :param byte_str: The byte sequence to examine. - :type byte_str: ``bytes`` or ``bytearray`` - """ - if not isinstance(byte_str, bytearray): - if not isinstance(byte_str, bytes): - raise TypeError('Expected object of type bytes or bytearray, got: ' - '{}'.format(type(byte_str))) - else: - byte_str = bytearray(byte_str) - - detector = UniversalDetector() - detector.feed(byte_str) - detector.close() - - if detector._input_state == InputState.HIGH_BYTE: - results = [] - for prober in detector._charset_probers: - if prober.get_confidence() > detector.MINIMUM_THRESHOLD: - charset_name = prober.charset_name - lower_charset_name = prober.charset_name.lower() - # Use Windows encoding name instead of ISO-8859 if we saw any - # extra Windows-specific bytes - if lower_charset_name.startswith('iso-8859'): - if detector._has_win_bytes: - charset_name = detector.ISO_WIN_MAP.get(lower_charset_name, - charset_name) - results.append({ - 'encoding': charset_name, - 'confidence': prober.get_confidence(), - 'language': prober.language, - }) - if len(results) > 0: - return sorted(results, key=lambda result: -result['confidence']) - - return [detector.result] diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/color.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/color.py deleted file mode 100644 index f0fa026d64687dea1ddfa061ca5875578eb45db2..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/color.py +++ /dev/null @@ -1,581 +0,0 @@ -import platform -import re -from colorsys import rgb_to_hls -from enum import IntEnum -from functools import lru_cache -from typing import TYPE_CHECKING, NamedTuple, Optional, Tuple - -from ._palettes import EIGHT_BIT_PALETTE, STANDARD_PALETTE, WINDOWS_PALETTE -from .color_triplet import ColorTriplet -from .repr import rich_repr, Result -from .terminal_theme import DEFAULT_TERMINAL_THEME - -if TYPE_CHECKING: # pragma: no cover - from .terminal_theme import TerminalTheme - from .text import Text - - -WINDOWS = platform.system() == "Windows" - - -class ColorSystem(IntEnum): - """One of the 3 color system supported by terminals.""" - - STANDARD = 1 - EIGHT_BIT = 2 - TRUECOLOR = 3 - WINDOWS = 4 - - def __repr__(self) -> str: - return f"ColorSystem.{self.name}" - - -class ColorType(IntEnum): - """Type of color stored in Color class.""" - - DEFAULT = 0 - STANDARD = 1 - EIGHT_BIT = 2 - TRUECOLOR = 3 - WINDOWS = 4 - - def __repr__(self) -> str: - return f"ColorType.{self.name}" - - -ANSI_COLOR_NAMES = { - "black": 0, - "red": 1, - "green": 2, - "yellow": 3, - "blue": 4, - "magenta": 5, - "cyan": 6, - "white": 7, - "bright_black": 8, - "bright_red": 9, - "bright_green": 10, - "bright_yellow": 11, - "bright_blue": 12, - "bright_magenta": 13, - "bright_cyan": 14, - "bright_white": 15, - "grey0": 16, - "navy_blue": 17, - "dark_blue": 18, - "blue3": 20, - "blue1": 21, - "dark_green": 22, - "deep_sky_blue4": 25, - "dodger_blue3": 26, - "dodger_blue2": 27, - "green4": 28, - "spring_green4": 29, - "turquoise4": 30, - "deep_sky_blue3": 32, - "dodger_blue1": 33, - "green3": 40, - "spring_green3": 41, - "dark_cyan": 36, - "light_sea_green": 37, - "deep_sky_blue2": 38, - "deep_sky_blue1": 39, - "spring_green2": 47, - "cyan3": 43, - "dark_turquoise": 44, - "turquoise2": 45, - "green1": 46, - "spring_green1": 48, - "medium_spring_green": 49, - "cyan2": 50, - "cyan1": 51, - "dark_red": 88, - "deep_pink4": 125, - "purple4": 55, - "purple3": 56, - "blue_violet": 57, - "orange4": 94, - "grey37": 59, - "medium_purple4": 60, - "slate_blue3": 62, - "royal_blue1": 63, - "chartreuse4": 64, - "dark_sea_green4": 71, - "pale_turquoise4": 66, - "steel_blue": 67, - "steel_blue3": 68, - "cornflower_blue": 69, - "chartreuse3": 76, - "cadet_blue": 73, - "sky_blue3": 74, - "steel_blue1": 81, - "pale_green3": 114, - "sea_green3": 78, - "aquamarine3": 79, - "medium_turquoise": 80, - "chartreuse2": 112, - "sea_green2": 83, - "sea_green1": 85, - "aquamarine1": 122, - "dark_slate_gray2": 87, - "dark_magenta": 91, - "dark_violet": 128, - "purple": 129, - "light_pink4": 95, - "plum4": 96, - "medium_purple3": 98, - "slate_blue1": 99, - "yellow4": 106, - "wheat4": 101, - "grey53": 102, - "light_slate_grey": 103, - "medium_purple": 104, - "light_slate_blue": 105, - "dark_olive_green3": 149, - "dark_sea_green": 108, - "light_sky_blue3": 110, - "sky_blue2": 111, - "dark_sea_green3": 150, - "dark_slate_gray3": 116, - "sky_blue1": 117, - "chartreuse1": 118, - "light_green": 120, - "pale_green1": 156, - "dark_slate_gray1": 123, - "red3": 160, - "medium_violet_red": 126, - "magenta3": 164, - "dark_orange3": 166, - "indian_red": 167, - "hot_pink3": 168, - "medium_orchid3": 133, - "medium_orchid": 134, - "medium_purple2": 140, - "dark_goldenrod": 136, - "light_salmon3": 173, - "rosy_brown": 138, - "grey63": 139, - "medium_purple1": 141, - "gold3": 178, - "dark_khaki": 143, - "navajo_white3": 144, - "grey69": 145, - "light_steel_blue3": 146, - "light_steel_blue": 147, - "yellow3": 184, - "dark_sea_green2": 157, - "light_cyan3": 152, - "light_sky_blue1": 153, - "green_yellow": 154, - "dark_olive_green2": 155, - "dark_sea_green1": 193, - "pale_turquoise1": 159, - "deep_pink3": 162, - "magenta2": 200, - "hot_pink2": 169, - "orchid": 170, - "medium_orchid1": 207, - "orange3": 172, - "light_pink3": 174, - "pink3": 175, - "plum3": 176, - "violet": 177, - "light_goldenrod3": 179, - "tan": 180, - "misty_rose3": 181, - "thistle3": 182, - "plum2": 183, - "khaki3": 185, - "light_goldenrod2": 222, - "light_yellow3": 187, - "grey84": 188, - "light_steel_blue1": 189, - "yellow2": 190, - "dark_olive_green1": 192, - "honeydew2": 194, - "light_cyan1": 195, - "red1": 196, - "deep_pink2": 197, - "deep_pink1": 199, - "magenta1": 201, - "orange_red1": 202, - "indian_red1": 204, - "hot_pink": 206, - "dark_orange": 208, - "salmon1": 209, - "light_coral": 210, - "pale_violet_red1": 211, - "orchid2": 212, - "orchid1": 213, - "orange1": 214, - "sandy_brown": 215, - "light_salmon1": 216, - "light_pink1": 217, - "pink1": 218, - "plum1": 219, - "gold1": 220, - "navajo_white1": 223, - "misty_rose1": 224, - "thistle1": 225, - "yellow1": 226, - "light_goldenrod1": 227, - "khaki1": 228, - "wheat1": 229, - "cornsilk1": 230, - "grey100": 231, - "grey3": 232, - "grey7": 233, - "grey11": 234, - "grey15": 235, - "grey19": 236, - "grey23": 237, - "grey27": 238, - "grey30": 239, - "grey35": 240, - "grey39": 241, - "grey42": 242, - "grey46": 243, - "grey50": 244, - "grey54": 245, - "grey58": 246, - "grey62": 247, - "grey66": 248, - "grey70": 249, - "grey74": 250, - "grey78": 251, - "grey82": 252, - "grey85": 253, - "grey89": 254, - "grey93": 255, -} - - -class ColorParseError(Exception): - """The color could not be parsed.""" - - -RE_COLOR = re.compile( - r"""^ -\#([0-9a-f]{6})$| -color\(([0-9]{1,3})\)$| -rgb\(([\d\s,]+)\)$ -""", - re.VERBOSE, -) - - -@rich_repr -class Color(NamedTuple): - """Terminal color definition.""" - - name: str - """The name of the color (typically the input to Color.parse).""" - type: ColorType - """The type of the color.""" - number: Optional[int] = None - """The color number, if a standard color, or None.""" - triplet: Optional[ColorTriplet] = None - """A triplet of color components, if an RGB color.""" - - def __rich__(self) -> "Text": - """Dispays the actual color if Rich printed.""" - from .text import Text - from .style import Style - - return Text.assemble( - f"<color {self.name!r} ({self.type.name.lower()})", - ("⬤", Style(color=self)), - " >", - ) - - def __rich_repr__(self) -> Result: - yield self.name - yield self.type - yield "number", self.number, None - yield "triplet", self.triplet, None - - @property - def system(self) -> ColorSystem: - """Get the native color system for this color.""" - if self.type == ColorType.DEFAULT: - return ColorSystem.STANDARD - return ColorSystem(int(self.type)) - - @property - def is_system_defined(self) -> bool: - """Check if the color is ultimately defined by the system.""" - return self.system not in (ColorSystem.EIGHT_BIT, ColorSystem.TRUECOLOR) - - @property - def is_default(self) -> bool: - """Check if the color is a default color.""" - return self.type == ColorType.DEFAULT - - def get_truecolor( - self, theme: Optional["TerminalTheme"] = None, foreground: bool = True - ) -> ColorTriplet: - """Get an equivalent color triplet for this color. - - Args: - theme (TerminalTheme, optional): Optional terminal theme, or None to use default. Defaults to None. - foreground (bool, optional): True for a foreground color, or False for background. Defaults to True. - - Returns: - ColorTriplet: A color triplet containing RGB components. - """ - - if theme is None: - theme = DEFAULT_TERMINAL_THEME - if self.type == ColorType.TRUECOLOR: - assert self.triplet is not None - return self.triplet - elif self.type == ColorType.EIGHT_BIT: - assert self.number is not None - return EIGHT_BIT_PALETTE[self.number] - elif self.type == ColorType.STANDARD: - assert self.number is not None - return theme.ansi_colors[self.number] - elif self.type == ColorType.WINDOWS: - assert self.number is not None - return WINDOWS_PALETTE[self.number] - else: # self.type == ColorType.DEFAULT: - assert self.number is None - return theme.foreground_color if foreground else theme.background_color - - @classmethod - def from_ansi(cls, number: int) -> "Color": - """Create a Color number from it's 8-bit ansi number. - - Args: - number (int): A number between 0-255 inclusive. - - Returns: - Color: A new Color instance. - """ - return cls( - name=f"color({number})", - type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT), - number=number, - ) - - @classmethod - def from_triplet(cls, triplet: "ColorTriplet") -> "Color": - """Create a truecolor RGB color from a triplet of values. - - Args: - triplet (ColorTriplet): A color triplet containing red, green and blue components. - - Returns: - Color: A new color object. - """ - return cls(name=triplet.hex, type=ColorType.TRUECOLOR, triplet=triplet) - - @classmethod - def from_rgb(cls, red: float, green: float, blue: float) -> "Color": - """Create a truecolor from three color components in the range(0->255). - - Args: - red (float): Red component in range 0-255. - green (float): Green component in range 0-255. - blue (float): Blue component in range 0-255. - - Returns: - Color: A new color object. - """ - return cls.from_triplet(ColorTriplet(int(red), int(green), int(blue))) - - @classmethod - def default(cls) -> "Color": - """Get a Color instance representing the default color. - - Returns: - Color: Default color. - """ - return cls(name="default", type=ColorType.DEFAULT) - - @classmethod - @lru_cache(maxsize=1024) - def parse(cls, color: str) -> "Color": - """Parse a color definition.""" - original_color = color - color = color.lower().strip() - - if color == "default": - return cls(color, type=ColorType.DEFAULT) - - color_number = ANSI_COLOR_NAMES.get(color) - if color_number is not None: - return cls( - color, - type=(ColorType.STANDARD if color_number < 16 else ColorType.EIGHT_BIT), - number=color_number, - ) - - color_match = RE_COLOR.match(color) - if color_match is None: - raise ColorParseError(f"{original_color!r} is not a valid color") - - color_24, color_8, color_rgb = color_match.groups() - if color_24: - triplet = ColorTriplet( - int(color_24[0:2], 16), int(color_24[2:4], 16), int(color_24[4:6], 16) - ) - return cls(color, ColorType.TRUECOLOR, triplet=triplet) - - elif color_8: - number = int(color_8) - if number > 255: - raise ColorParseError(f"color number must be <= 255 in {color!r}") - return cls( - color, - type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT), - number=number, - ) - - else: # color_rgb: - components = color_rgb.split(",") - if len(components) != 3: - raise ColorParseError( - f"expected three components in {original_color!r}" - ) - red, green, blue = components - triplet = ColorTriplet(int(red), int(green), int(blue)) - if not all(component <= 255 for component in triplet): - raise ColorParseError( - f"color components must be <= 255 in {original_color!r}" - ) - return cls(color, ColorType.TRUECOLOR, triplet=triplet) - - @lru_cache(maxsize=1024) - def get_ansi_codes(self, foreground: bool = True) -> Tuple[str, ...]: - """Get the ANSI escape codes for this color.""" - _type = self.type - if _type == ColorType.DEFAULT: - return ("39" if foreground else "49",) - - elif _type == ColorType.WINDOWS: - number = self.number - assert number is not None - fore, back = (30, 40) if number < 8 else (82, 92) - return (str(fore + number if foreground else back + number),) - - elif _type == ColorType.STANDARD: - number = self.number - assert number is not None - fore, back = (30, 40) if number < 8 else (82, 92) - return (str(fore + number if foreground else back + number),) - - elif _type == ColorType.EIGHT_BIT: - assert self.number is not None - return ("38" if foreground else "48", "5", str(self.number)) - - else: # self.standard == ColorStandard.TRUECOLOR: - assert self.triplet is not None - red, green, blue = self.triplet - return ("38" if foreground else "48", "2", str(red), str(green), str(blue)) - - @lru_cache(maxsize=1024) - def downgrade(self, system: ColorSystem) -> "Color": - """Downgrade a color system to a system with fewer colors.""" - - if self.type in [ColorType.DEFAULT, system]: - return self - # Convert to 8-bit color from truecolor color - if system == ColorSystem.EIGHT_BIT and self.system == ColorSystem.TRUECOLOR: - assert self.triplet is not None - red, green, blue = self.triplet.normalized - _h, l, s = rgb_to_hls(red, green, blue) - # If saturation is under 10% assume it is grayscale - if s < 0.1: - gray = round(l * 25.0) - if gray == 0: - color_number = 16 - elif gray == 25: - color_number = 231 - else: - color_number = 231 + gray - return Color(self.name, ColorType.EIGHT_BIT, number=color_number) - - color_number = ( - 16 + 36 * round(red * 5.0) + 6 * round(green * 5.0) + round(blue * 5.0) - ) - return Color(self.name, ColorType.EIGHT_BIT, number=color_number) - - # Convert to standard from truecolor or 8-bit - elif system == ColorSystem.STANDARD: - if self.system == ColorSystem.TRUECOLOR: - assert self.triplet is not None - triplet = self.triplet - else: # self.system == ColorSystem.EIGHT_BIT - assert self.number is not None - triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number]) - - color_number = STANDARD_PALETTE.match(triplet) - return Color(self.name, ColorType.STANDARD, number=color_number) - - elif system == ColorSystem.WINDOWS: - if self.system == ColorSystem.TRUECOLOR: - assert self.triplet is not None - triplet = self.triplet - else: # self.system == ColorSystem.EIGHT_BIT - assert self.number is not None - if self.number < 16: - return Color(self.name, ColorType.WINDOWS, number=self.number) - triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number]) - - color_number = WINDOWS_PALETTE.match(triplet) - return Color(self.name, ColorType.WINDOWS, number=color_number) - - return self - - -def parse_rgb_hex(hex_color: str) -> ColorTriplet: - """Parse six hex characters in to RGB triplet.""" - assert len(hex_color) == 6, "must be 6 characters" - color = ColorTriplet( - int(hex_color[0:2], 16), int(hex_color[2:4], 16), int(hex_color[4:6], 16) - ) - return color - - -def blend_rgb( - color1: ColorTriplet, color2: ColorTriplet, cross_fade: float = 0.5 -) -> ColorTriplet: - """Blend one RGB color in to another.""" - r1, g1, b1 = color1 - r2, g2, b2 = color2 - new_color = ColorTriplet( - int(r1 + (r2 - r1) * cross_fade), - int(g1 + (g2 - g1) * cross_fade), - int(b1 + (b2 - b1) * cross_fade), - ) - return new_color - - -if __name__ == "__main__": # pragma: no cover - - from .console import Console - from .table import Table - from .text import Text - - console = Console() - - table = Table(show_footer=False, show_edge=True) - table.add_column("Color", width=10, overflow="ellipsis") - table.add_column("Number", justify="right", style="yellow") - table.add_column("Name", style="green") - table.add_column("Hex", style="blue") - table.add_column("RGB", style="magenta") - - colors = sorted((v, k) for k, v in ANSI_COLOR_NAMES.items()) - for color_number, name in colors: - color_cell = Text(" " * 10, style=f"on {name}") - if color_number < 16: - table.add_row(color_cell, f"{color_number}", Text(f'"{name}"')) - else: - color = EIGHT_BIT_PALETTE[color_number] # type: ignore - table.add_row( - color_cell, str(color_number), Text(f'"{name}"'), color.hex, color.rgb - ) - - console.print(table) diff --git a/spaces/algomuffin/jojo_fork/e4e/configs/paths_config.py b/spaces/algomuffin/jojo_fork/e4e/configs/paths_config.py deleted file mode 100644 index 4604f6063b8125364a52a492de52fcc54004f373..0000000000000000000000000000000000000000 --- a/spaces/algomuffin/jojo_fork/e4e/configs/paths_config.py +++ /dev/null @@ -1,28 +0,0 @@ -dataset_paths = { - # Face Datasets (In the paper: FFHQ - train, CelebAHQ - test) - 'ffhq': '', - 'celeba_test': '', - - # Cars Dataset (In the paper: Stanford cars) - 'cars_train': '', - 'cars_test': '', - - # Horse Dataset (In the paper: LSUN Horse) - 'horse_train': '', - 'horse_test': '', - - # Church Dataset (In the paper: LSUN Church) - 'church_train': '', - 'church_test': '', - - # Cats Dataset (In the paper: LSUN Cat) - 'cats_train': '', - 'cats_test': '' -} - -model_paths = { - 'stylegan_ffhq': 'pretrained_models/stylegan2-ffhq-config-f.pt', - 'ir_se50': 'pretrained_models/model_ir_se50.pth', - 'shape_predictor': 'pretrained_models/shape_predictor_68_face_landmarks.dat', - 'moco': 'pretrained_models/moco_v2_800ep_pretrain.pth' -} diff --git a/spaces/ali-ghamdan/deoldify/fastai/callbacks/__init__.py b/spaces/ali-ghamdan/deoldify/fastai/callbacks/__init__.py deleted file mode 100644 index f3032b58f89f954496b10722efcfdd539b5a6725..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/deoldify/fastai/callbacks/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .lr_finder import * -from .one_cycle import * -from .fp16 import * -from .general_sched import * -from .hooks import * -from .mixup import * -from .rnn import * -from .tracker import * -from .csv_logger import * -from .loss_metrics import * -from .oversampling import * diff --git a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/NodeList.pod b/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/NodeList.pod deleted file mode 100644 index 1767c5b6a0100851ffe94296eeb2e5dffbf6b70d..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/NodeList.pod +++ /dev/null @@ -1,46 +0,0 @@ -=head1 NAME - -XML::DOM::NodeList - A node list as used by XML::DOM - -=head1 DESCRIPTION - -The NodeList interface provides the abstraction of an ordered -collection of nodes, without defining or constraining how this -collection is implemented. - -The items in the NodeList are accessible via an integral index, -starting from 0. - -Although the DOM spec states that all NodeLists are "live" in that they -allways reflect changes to the DOM tree, the NodeList returned by -getElementsByTagName is not live in this implementation. See L<CAVEATS> -for details. - -=head2 METHODS - -=over 4 - -=item item (index) - -Returns the indexth item in the collection. If index is -greater than or equal to the number of nodes in the list, -this returns undef. - -=item getLength - -The number of nodes in the list. The range of valid child -node indices is 0 to length-1 inclusive. - -=back - -=head2 Additional methods not in the DOM Spec - -=over 4 - -=item dispose - -Removes all circular references in this NodeList and its descendants so the -objects can be claimed for garbage collection. The objects should not be used -afterwards. - -=back diff --git a/spaces/apratap5/Abhay-ASRLiveSpeechRecognition-ZR/app.py b/spaces/apratap5/Abhay-ASRLiveSpeechRecognition-ZR/app.py deleted file mode 100644 index 140f6f0a04ec368cd560dcc02026a6e8a2b54725..0000000000000000000000000000000000000000 --- a/spaces/apratap5/Abhay-ASRLiveSpeechRecognition-ZR/app.py +++ /dev/null @@ -1,168 +0,0 @@ -import gradio as gr -import torch -import time -import librosa -import soundfile -import nemo.collections.asr as nemo_asr -import tempfile -import os -import uuid - -from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration -import torch - -# PersistDataset ----- -import os -import csv -import gradio as gr -from gradio import inputs, outputs -import huggingface_hub -from huggingface_hub import Repository, hf_hub_download, upload_file -from datetime import datetime - -# --------------------------------------------- -# Dataset and Token links - change awacke1 to your own HF id, and add a HF_TOKEN copy to your repo for write permissions -# This should allow you to save your results to your own Dataset hosted on HF. --- -#DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/Carddata.csv" -#DATASET_REPO_ID = "awacke1/Carddata.csv" -#DATA_FILENAME = "Carddata.csv" -#DATA_FILE = os.path.join("data", DATA_FILENAME) -#HF_TOKEN = os.environ.get("HF_TOKEN") -#SCRIPT = """ - -#<script> -#if (!window.hasBeenRun) { -# window.hasBeenRun = true; -# console.log("should only happen once"); -# document.querySelector("button.submit").click(); -#} -#</script> -#""" - -#try: -# hf_hub_download( -# repo_id=DATASET_REPO_ID, -# filename=DATA_FILENAME, -# cache_dir=DATA_DIRNAME, -# force_filename=DATA_FILENAME -# ) -#except: -# print("file not found") -#repo = Repository( -# local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN -#) - -#def store_message(name: str, message: str): -# if name and message: -# with open(DATA_FILE, "a") as csvfile: -# writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"]) -# writer.writerow( -# {"name": name.strip(), "message": message.strip(), "time": str(datetime.now())} -# ) -# # uncomment line below to begin saving - -# commit_url = repo.push_to_hub() -# return "" - -#iface = gr.Interface( -# store_message, -# [ -# inputs.Textbox(placeholder="Your name"), -# inputs.Textbox(placeholder="Your message", lines=2), -# ], -# "html", -# css=""" -# .message {background-color:cornflowerblue;color:white; padding:4px;margin:4px;border-radius:4px; } -# """, -# title="Reading/writing to a HuggingFace dataset repo from Spaces", -# description=f"This is a demo of how to do simple *shared data persistence* in a Gradio Space, backed by a dataset repo.", -# article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})", -#) - - -# main ------------------------- -mname = "facebook/blenderbot-400M-distill" -model = BlenderbotForConditionalGeneration.from_pretrained(mname) -tokenizer = BlenderbotTokenizer.from_pretrained(mname) - -def take_last_tokens(inputs, note_history, history): - """Filter the last 128 tokens""" - if inputs['input_ids'].shape[1] > 128: - inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()]) - inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()]) - note_history = ['</s> <s>'.join(note_history[0].split('</s> <s>')[2:])] - history = history[1:] - return inputs, note_history, history - -def add_note_to_history(note, note_history): - """Add a note to the historical information""" - note_history.append(note) - note_history = '</s> <s>'.join(note_history) - return [note_history] - - -def chat(message, history): - history = history or [] - if history: - history_useful = ['</s> <s>'.join([str(a[0])+'</s> <s>'+str(a[1]) for a in history])] - else: - history_useful = [] - history_useful = add_note_to_history(message, history_useful) - inputs = tokenizer(history_useful, return_tensors="pt") - inputs, history_useful, history = take_last_tokens(inputs, history_useful, history) - reply_ids = model.generate(**inputs) - response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0] - history_useful = add_note_to_history(response, history_useful) - list_history = history_useful[0].split('</s> <s>') - history.append((list_history[-2], list_history[-1])) -# store_message(message, response) # Save to dataset - uncomment if you uncomment above to save inputs and outputs to your dataset - return history, history - - -SAMPLE_RATE = 16000 -model = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained("nvidia/stt_en_conformer_transducer_xlarge") -model.change_decoding_strategy(None) -model.eval() - -def process_audio_file(file): - data, sr = librosa.load(file) - if sr != SAMPLE_RATE: - data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE) - # monochannel - data = librosa.to_mono(data) - return data - - -def transcribe(audio, state = ""): - if state is None: - state = "" - audio_data = process_audio_file(audio) - with tempfile.TemporaryDirectory() as tmpdir: - audio_path = os.path.join(tmpdir, f'audio_{uuid.uuid4()}.wav') - soundfile.write(audio_path, audio_data, SAMPLE_RATE) - transcriptions = model.transcribe([audio_path]) - if type(transcriptions) == tuple and len(transcriptions) == 2: - transcriptions = transcriptions[0] - transcriptions = transcriptions[0] -# store_message(transcriptions, state) # Save to dataset - uncomment to store into a dataset - hint you will need your HF_TOKEN - state = state + transcriptions + " " - return state, state - -iface = gr.Interface( - fn=transcribe, - inputs=[ - gr.Audio(source="microphone", type='filepath', streaming=True), - "state", - ], - outputs=[ - "textbox", - "state", - ], - layout="horizontal", - theme="huggingface", - title="🗣️LiveSpeechRecognition🧠Memory💾", - description=f"Live Automatic Speech Recognition (ASR) with Memory💾 Dataset.", - allow_flagging='never', - live=True, -# article=f"Result Output Saved to Memory💾 Dataset: [{DATASET_REPO_URL}]({DATASET_REPO_URL})" -) -iface.launch() diff --git a/spaces/artificialguybr/VIDEO-TRANSLATION-TRANSCRIPTION/app.py b/spaces/artificialguybr/VIDEO-TRANSLATION-TRANSCRIPTION/app.py deleted file mode 100644 index c58b69e737c8cf58fca776ac58492b8ec3a30528..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/VIDEO-TRANSLATION-TRANSCRIPTION/app.py +++ /dev/null @@ -1,153 +0,0 @@ -import gradio as gr -from transformers import AutoModelForSeq2SeqLM, AutoTokenizer -from subprocess import run -from faster_whisper import WhisperModel -import json -import tempfile -import os -import ffmpeg -from zipfile import ZipFile -import stat -import uuid -import subprocess -import torch -import bitsandbytes -import scipy -from googletrans import Translator -import re -import subprocess - -ZipFile("ffmpeg.zip").extractall() -st = os.stat('ffmpeg') -os.chmod('ffmpeg', st.st_mode | stat.S_IEXEC) - -with open('google_lang_codes.json', 'r') as f: - google_lang_codes = json.load(f) - -translator = Translator() - -#tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-3.3B") -#model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-3.3B") -whisper_model = WhisperModel("large-v2", device="cuda", compute_type="float16") - -print("cwd", os.getcwd()) -print(os.listdir()) - - -def process_video(Video, target_language): - current_path = os.getcwd() - print("Iniciando process_video") - common_uuid = uuid.uuid4() - print("Checking FFmpeg availability...") - run(["ffmpeg", "-version"]) - audio_file = f"{common_uuid}.wav" - run(["ffmpeg", "-i", Video, audio_file]) - - # Transcription with Whisper. - print("Iniciando transcrição com Whisper") - segments, _ = whisper_model.transcribe(audio_file, beam_size=5) - segments = list(segments) - transcript_file = f"{current_path}/{common_uuid}.srt" - - # Create a list to hold the translated lines. - translated_lines = [] - - with open(transcript_file, "w+", encoding="utf-8") as f: - counter = 1 - for segment in segments: - start_hours = int(segment.start // 3600) - start_minutes = int((segment.start % 3600) // 60) - start_seconds = int(segment.start % 60) - start_milliseconds = int((segment.start - int(segment.start)) * 1000) - - end_hours = int(segment.end // 3600) - end_minutes = int((segment.end % 3600) // 60) - end_seconds = int(segment.end % 60) - end_milliseconds = int((segment.end - int(segment.end)) * 1000) - - formatted_start = f"{start_hours:02d}:{start_minutes:02d}:{start_seconds:02d},{start_milliseconds:03d}" - formatted_end = f"{end_hours:02d}:{end_minutes:02d}:{end_seconds:02d},{end_milliseconds:03d}" - - f.write(f"{counter}\n") - f.write(f"{formatted_start} --> {formatted_end}\n") - f.write(f"{segment.text}\n\n") - counter += 1 - - - # Move the file pointer to the beginning of the file. - f.seek(0) - - # Translating the SRT from Whisper with NLLB. - target_language_code = google_lang_codes.get(target_language, "en") - paragraph = "" - for line in f: - if line.strip().isnumeric() or "-->" in line: - translated_lines.append(line) - elif line.strip() != "": - translated_text = translator.translate(line.strip(), dest=target_language_code).text - translated_lines.append(translated_text + "\n") - else: - translated_lines.append("\n") - - # Move the file pointer to the beginning of the file and truncate it. - f.seek(0) - f.truncate() - - # Write the translated lines back into the original file. - f.writelines(translated_lines) - #return None, None - output_video = f"{common_uuid}_output_video.mp4" - # Debugging: Validate FFmpeg command for subtitle embedding - print("Validating FFmpeg command for subtitle embedding...") - print(f"Translated SRT file: {transcript_file}") - - with open(transcript_file, 'r', encoding='utf-8') as f: - print(f"First few lines of translated SRT: {f.readlines()[:10]}") - if os.path.exists(transcript_file): - print(f"{transcript_file} exists.") - else: - print(f"{transcript_file} does not exist.") - #transcript_file_abs_path = os.path.abspath(transcript_file) - try: - if target_language_code == 'ja': # 'ja' é o código de idioma para o japonês - result = subprocess.run(["ffmpeg", "-i", Video, "-vf", f"subtitles={transcript_file}:force_style='FontName=Noto Sans CJK JP',charenc=UTF-8", "-scodec", "mov_text", "-metadata:s:s:0", "language=jpn", output_video], capture_output=True, text=True) - else: - result = subprocess.run(["ffmpeg", "-i", Video, "-vf", f"subtitles={transcript_file}:force_style='FontName=Arial Unicode MS'", output_video], capture_output=True, text=True) - if result.returncode == 0: - print("FFmpeg executado com sucesso.") - else: - print(f"FFmpeg falhou com o código de retorno {result.returncode}.") - print("Stdout:", result.stdout) - print("Stderr:", result.stderr) - except Exception as e: - print(f"Ocorreu uma exceção: {e}") - print("process_video concluído com sucesso") - os.unlink(audio_file) - os.unlink(transcript_file) - print(f"Returning output video path: {output_video}") - return output_video - -iface = gr.Interface( - fn=process_video, - inputs=[ - gr.Video(), - gr.Dropdown(choices=list(google_lang_codes.keys()), label="Target Language for Translation", value="English"), - ], - outputs=[ - gr.Video(), - #gr.FileExplorer() - ], - live=False, - title="VIDEO TRANSCRIPTION AND TRANSLATION", - description="""This tool was developed by [@artificialguybr](https://twitter.com/artificialguybr) using entirely open-source tools. Special thanks to Hugging Face for the GPU support. Test the [Video Dubbing](https://huggingface.co/spaces/artificialguybr/video-dubbing) space!""", - allow_flagging=False -) -with gr.Blocks() as demo: - iface.render() - gr.Markdown(""" - **Note:** - - Video limit is 15 minute. It will do the transcription and translate of subtitles. - - The tool uses open-source models for all models. It's a alpha version. - """) -demo.queue(concurrency_count=1, max_size=15) -demo.launch() \ No newline at end of file diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/PyrexTypes.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/PyrexTypes.py deleted file mode 100644 index dcb51fe34114d0538d582a3a76e3d80c000b0139..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/PyrexTypes.py +++ /dev/null @@ -1,4735 +0,0 @@ -# -# Cython/Python language types -# - -from __future__ import absolute_import - -import copy -import hashlib -import re - -try: - reduce -except NameError: - from functools import reduce - -from Cython.Utils import cached_function -from .Code import UtilityCode, LazyUtilityCode, TempitaUtilityCode -from . import StringEncoding -from . import Naming - -from .Errors import error, warning - - -class BaseType(object): - # - # Base class for all Cython types including pseudo-types. - - # List of attribute names of any subtypes - subtypes = [] - _empty_declaration = None - _specialization_name = None - default_format_spec = None - - def can_coerce_to_pyobject(self, env): - return False - - def can_coerce_from_pyobject(self, env): - return False - - def can_coerce_to_pystring(self, env, format_spec=None): - return False - - def convert_to_pystring(self, cvalue, code, format_spec=None): - raise NotImplementedError("C types that support string formatting must override this method") - - def cast_code(self, expr_code): - return "((%s)%s)" % (self.empty_declaration_code(), expr_code) - - def empty_declaration_code(self): - if self._empty_declaration is None: - self._empty_declaration = self.declaration_code('') - return self._empty_declaration - - def specialization_name(self): - if self._specialization_name is None: - # This is not entirely robust. - common_subs = (self.empty_declaration_code() - .replace("unsigned ", "unsigned_") - .replace("long long", "long_long") - .replace(" ", "__")) - self._specialization_name = re.sub( - '[^a-zA-Z0-9_]', lambda x: '_%x_' % ord(x.group(0)), common_subs) - return self._specialization_name - - def base_declaration_code(self, base_code, entity_code): - if entity_code: - return "%s %s" % (base_code, entity_code) - else: - return base_code - - def __deepcopy__(self, memo): - """ - Types never need to be copied, if we do copy, Unfortunate Things - Will Happen! - """ - return self - - def get_fused_types(self, result=None, seen=None, subtypes=None): - subtypes = subtypes or self.subtypes - if not subtypes: - return None - - if result is None: - result = [] - seen = set() - - for attr in subtypes: - list_or_subtype = getattr(self, attr) - if list_or_subtype: - if isinstance(list_or_subtype, BaseType): - list_or_subtype.get_fused_types(result, seen) - else: - for subtype in list_or_subtype: - subtype.get_fused_types(result, seen) - - return result - - def specialize_fused(self, env): - if env.fused_to_specific: - return self.specialize(env.fused_to_specific) - - return self - - @property - def is_fused(self): - """ - Whether this type or any of its subtypes is a fused type - """ - # Add this indirection for the is_fused property to allow overriding - # get_fused_types in subclasses. - return self.get_fused_types() - - def deduce_template_params(self, actual): - """ - Deduce any template params in this (argument) type given the actual - argument type. - - http://en.cppreference.com/w/cpp/language/function_template#Template_argument_deduction - """ - return {} - - def __lt__(self, other): - """ - For sorting. The sorting order should correspond to the preference of - conversion from Python types. - - Override to provide something sensible. This is only implemented so that - python 3 doesn't trip - """ - return id(type(self)) < id(type(other)) - - def py_type_name(self): - """ - Return the name of the Python type that can coerce to this type. - """ - - def typeof_name(self): - """ - Return the string with which fused python functions can be indexed. - """ - if self.is_builtin_type or self.py_type_name() == 'object': - index_name = self.py_type_name() - else: - index_name = str(self) - - return index_name - - def check_for_null_code(self, cname): - """ - Return the code for a NULL-check in case an UnboundLocalError should - be raised if an entry of this type is referenced before assignment. - Returns None if no check should be performed. - """ - return None - - def invalid_value(self): - """ - Returns the most invalid value an object of this type can assume as a - C expression string. Returns None if no such value exists. - """ - - -class PyrexType(BaseType): - # - # Base class for all Cython types - # - # is_pyobject boolean Is a Python object type - # is_extension_type boolean Is a Python extension type - # is_final_type boolean Is a final extension type - # is_numeric boolean Is a C numeric type - # is_int boolean Is a C integer type - # is_float boolean Is a C floating point type - # is_complex boolean Is a C complex type - # is_void boolean Is the C void type - # is_array boolean Is a C array type - # is_ptr boolean Is a C pointer type - # is_null_ptr boolean Is the type of NULL - # is_reference boolean Is a C reference type - # is_const boolean Is a C const type. - # is_cfunction boolean Is a C function type - # is_struct_or_union boolean Is a C struct or union type - # is_struct boolean Is a C struct type - # is_enum boolean Is a C enum type - # is_typedef boolean Is a typedef type - # is_string boolean Is a C char * type - # is_pyunicode_ptr boolean Is a C PyUNICODE * type - # is_cpp_string boolean Is a C++ std::string type - # is_unicode_char boolean Is either Py_UCS4 or Py_UNICODE - # is_returncode boolean Is used only to signal exceptions - # is_error boolean Is the dummy error type - # is_buffer boolean Is buffer access type - # is_pythran_expr boolean Is Pythran expr - # is_numpy_buffer boolean Is Numpy array buffer - # has_attributes boolean Has C dot-selectable attributes - # default_value string Initial value that can be assigned before first user assignment. - # declaration_value string The value statically assigned on declaration (if any). - # entry Entry The Entry for this type - # - # declaration_code(entity_code, - # for_display = 0, dll_linkage = None, pyrex = 0) - # Returns a code fragment for the declaration of an entity - # of this type, given a code fragment for the entity. - # * If for_display, this is for reading by a human in an error - # message; otherwise it must be valid C code. - # * If dll_linkage is not None, it must be 'DL_EXPORT' or - # 'DL_IMPORT', and will be added to the base type part of - # the declaration. - # * If pyrex = 1, this is for use in a 'cdef extern' - # statement of a Cython include file. - # - # assignable_from(src_type) - # Tests whether a variable of this type can be - # assigned a value of type src_type. - # - # same_as(other_type) - # Tests whether this type represents the same type - # as other_type. - # - # as_argument_type(): - # Coerces array and C function types into pointer type for use as - # a formal argument type. - # - - is_pyobject = 0 - is_unspecified = 0 - is_extension_type = 0 - is_final_type = 0 - is_builtin_type = 0 - is_numeric = 0 - is_int = 0 - is_float = 0 - is_complex = 0 - is_void = 0 - is_array = 0 - is_ptr = 0 - is_null_ptr = 0 - is_reference = 0 - is_const = 0 - is_cfunction = 0 - is_struct_or_union = 0 - is_cpp_class = 0 - is_cpp_string = 0 - is_struct = 0 - is_enum = 0 - is_typedef = 0 - is_string = 0 - is_pyunicode_ptr = 0 - is_unicode_char = 0 - is_returncode = 0 - is_error = 0 - is_buffer = 0 - is_ctuple = 0 - is_memoryviewslice = 0 - is_pythran_expr = 0 - is_numpy_buffer = 0 - has_attributes = 0 - default_value = "" - declaration_value = "" - - def resolve(self): - # If a typedef, returns the base type. - return self - - def specialize(self, values): - # TODO(danilo): Override wherever it makes sense. - return self - - def literal_code(self, value): - # Returns a C code fragment representing a literal - # value of this type. - return str(value) - - def __str__(self): - return self.declaration_code("", for_display = 1).strip() - - def same_as(self, other_type, **kwds): - return self.same_as_resolved_type(other_type.resolve(), **kwds) - - def same_as_resolved_type(self, other_type): - return self == other_type or other_type is error_type - - def subtype_of(self, other_type): - return self.subtype_of_resolved_type(other_type.resolve()) - - def subtype_of_resolved_type(self, other_type): - return self.same_as(other_type) - - def assignable_from(self, src_type): - return self.assignable_from_resolved_type(src_type.resolve()) - - def assignable_from_resolved_type(self, src_type): - return self.same_as(src_type) - - def as_argument_type(self): - return self - - def is_complete(self): - # A type is incomplete if it is an unsized array, - # a struct whose attributes are not defined, etc. - return 1 - - def is_simple_buffer_dtype(self): - return (self.is_int or self.is_float or self.is_complex or self.is_pyobject or - self.is_extension_type or self.is_ptr) - - def struct_nesting_depth(self): - # Returns the number levels of nested structs. This is - # used for constructing a stack for walking the run-time - # type information of the struct. - return 1 - - def global_init_code(self, entry, code): - # abstract - pass - - def needs_nonecheck(self): - return 0 - - def _assign_from_py_code(self, source_code, result_code, error_pos, code, - from_py_function=None, error_condition=None, extra_args=None): - args = ', ' + ', '.join('%s' % arg for arg in extra_args) if extra_args else '' - convert_call = "%s(%s%s)" % ( - from_py_function or self.from_py_function, - source_code, - args, - ) - if self.is_enum: - convert_call = typecast(self, c_long_type, convert_call) - return '%s = %s; %s' % ( - result_code, - convert_call, - code.error_goto_if(error_condition or self.error_condition(result_code), error_pos)) - - -def public_decl(base_code, dll_linkage): - if dll_linkage: - return "%s(%s)" % (dll_linkage, base_code.replace(',', ' __PYX_COMMA ')) - else: - return base_code - -def create_typedef_type(name, base_type, cname, is_external=0, namespace=None): - is_fused = base_type.is_fused - if base_type.is_complex or is_fused: - if is_external: - if is_fused: - msg = "Fused" - else: - msg = "Complex" - - raise ValueError("%s external typedefs not supported" % msg) - - return base_type - else: - return CTypedefType(name, base_type, cname, is_external, namespace) - - -class CTypedefType(BaseType): - # - # Pseudo-type defined with a ctypedef statement in a - # 'cdef extern from' block. - # Delegates most attribute lookups to the base type. - # (Anything not defined here or in the BaseType is delegated.) - # - # qualified_name string - # typedef_name string - # typedef_cname string - # typedef_base_type PyrexType - # typedef_is_external bool - - is_typedef = 1 - typedef_is_external = 0 - - to_py_utility_code = None - from_py_utility_code = None - - subtypes = ['typedef_base_type'] - - def __init__(self, name, base_type, cname, is_external=0, namespace=None): - assert not base_type.is_complex - self.typedef_name = name - self.typedef_cname = cname - self.typedef_base_type = base_type - self.typedef_is_external = is_external - self.typedef_namespace = namespace - - def invalid_value(self): - return self.typedef_base_type.invalid_value() - - def resolve(self): - return self.typedef_base_type.resolve() - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - if pyrex or for_display: - base_code = self.typedef_name - else: - base_code = public_decl(self.typedef_cname, dll_linkage) - if self.typedef_namespace is not None and not pyrex: - base_code = "%s::%s" % (self.typedef_namespace.empty_declaration_code(), base_code) - return self.base_declaration_code(base_code, entity_code) - - def as_argument_type(self): - return self - - def cast_code(self, expr_code): - # If self is really an array (rather than pointer), we can't cast. - # For example, the gmp mpz_t. - if self.typedef_base_type.is_array: - base_type = self.typedef_base_type.base_type - return CPtrType(base_type).cast_code(expr_code) - else: - return BaseType.cast_code(self, expr_code) - - def specialize(self, values): - base_type = self.typedef_base_type.specialize(values) - namespace = self.typedef_namespace.specialize(values) if self.typedef_namespace else None - if base_type is self.typedef_base_type and namespace is self.typedef_namespace: - return self - else: - return create_typedef_type(self.typedef_name, base_type, self.typedef_cname, - 0, namespace) - - def __repr__(self): - return "<CTypedefType %s>" % self.typedef_cname - - def __str__(self): - return self.typedef_name - - def _create_utility_code(self, template_utility_code, - template_function_name): - type_name = type_identifier(self.typedef_cname) - utility_code = template_utility_code.specialize( - type = self.typedef_cname, - TypeName = type_name) - function_name = template_function_name % type_name - return utility_code, function_name - - def create_to_py_utility_code(self, env): - if self.typedef_is_external: - if not self.to_py_utility_code: - base_type = self.typedef_base_type - if type(base_type) is CIntType: - self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name() - env.use_utility_code(TempitaUtilityCode.load_cached( - "CIntToPy", "TypeConversion.c", - context={"TYPE": self.empty_declaration_code(), - "TO_PY_FUNCTION": self.to_py_function})) - return True - elif base_type.is_float: - pass # XXX implement! - elif base_type.is_complex: - pass # XXX implement! - pass - elif base_type.is_cpp_string: - cname = "__pyx_convert_PyObject_string_to_py_%s" % type_identifier(self) - context = { - 'cname': cname, - 'type': self.typedef_cname, - } - from .UtilityCode import CythonUtilityCode - env.use_utility_code(CythonUtilityCode.load( - "string.to_py", "CppConvert.pyx", context=context)) - self.to_py_function = cname - return True - if self.to_py_utility_code: - env.use_utility_code(self.to_py_utility_code) - return True - # delegation - return self.typedef_base_type.create_to_py_utility_code(env) - - def create_from_py_utility_code(self, env): - if self.typedef_is_external: - if not self.from_py_utility_code: - base_type = self.typedef_base_type - if type(base_type) is CIntType: - self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name() - env.use_utility_code(TempitaUtilityCode.load_cached( - "CIntFromPy", "TypeConversion.c", - context={"TYPE": self.empty_declaration_code(), - "FROM_PY_FUNCTION": self.from_py_function})) - return True - elif base_type.is_float: - pass # XXX implement! - elif base_type.is_complex: - pass # XXX implement! - elif base_type.is_cpp_string: - cname = '__pyx_convert_string_from_py_%s' % type_identifier(self) - context = { - 'cname': cname, - 'type': self.typedef_cname, - } - from .UtilityCode import CythonUtilityCode - env.use_utility_code(CythonUtilityCode.load( - "string.from_py", "CppConvert.pyx", context=context)) - self.from_py_function = cname - return True - if self.from_py_utility_code: - env.use_utility_code(self.from_py_utility_code) - return True - # delegation - return self.typedef_base_type.create_from_py_utility_code(env) - - def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): - if to_py_function is None: - to_py_function = self.to_py_function - return self.typedef_base_type.to_py_call_code( - source_code, result_code, result_type, to_py_function) - - def from_py_call_code(self, source_code, result_code, error_pos, code, - from_py_function=None, error_condition=None): - return self.typedef_base_type.from_py_call_code( - source_code, result_code, error_pos, code, - from_py_function or self.from_py_function, - error_condition or self.error_condition(result_code) - ) - - def overflow_check_binop(self, binop, env, const_rhs=False): - env.use_utility_code(UtilityCode.load("Common", "Overflow.c")) - type = self.empty_declaration_code() - name = self.specialization_name() - if binop == "lshift": - env.use_utility_code(TempitaUtilityCode.load_cached( - "LeftShift", "Overflow.c", - context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed})) - else: - if const_rhs: - binop += "_const" - _load_overflow_base(env) - env.use_utility_code(TempitaUtilityCode.load_cached( - "SizeCheck", "Overflow.c", - context={'TYPE': type, 'NAME': name})) - env.use_utility_code(TempitaUtilityCode.load_cached( - "Binop", "Overflow.c", - context={'TYPE': type, 'NAME': name, 'BINOP': binop})) - return "__Pyx_%s_%s_checking_overflow" % (binop, name) - - def error_condition(self, result_code): - if self.typedef_is_external: - if self.exception_value: - condition = "(%s == %s)" % ( - result_code, self.cast_code(self.exception_value)) - if self.exception_check: - condition += " && PyErr_Occurred()" - return condition - # delegation - return self.typedef_base_type.error_condition(result_code) - - def __getattr__(self, name): - return getattr(self.typedef_base_type, name) - - def py_type_name(self): - return self.typedef_base_type.py_type_name() - - def can_coerce_to_pyobject(self, env): - return self.typedef_base_type.can_coerce_to_pyobject(env) - - def can_coerce_from_pyobject(self, env): - return self.typedef_base_type.can_coerce_from_pyobject(env) - - -class MemoryViewSliceType(PyrexType): - - is_memoryviewslice = 1 - - has_attributes = 1 - scope = None - - # These are special cased in Defnode - from_py_function = None - to_py_function = None - - exception_value = None - exception_check = True - - subtypes = ['dtype'] - - def __init__(self, base_dtype, axes): - """ - MemoryViewSliceType(base, axes) - - Base is the C base type; axes is a list of (access, packing) strings, - where access is one of 'full', 'direct' or 'ptr' and packing is one of - 'contig', 'strided' or 'follow'. There is one (access, packing) tuple - for each dimension. - - the access specifiers determine whether the array data contains - pointers that need to be dereferenced along that axis when - retrieving/setting: - - 'direct' -- No pointers stored in this dimension. - 'ptr' -- Pointer stored in this dimension. - 'full' -- Check along this dimension, don't assume either. - - the packing specifiers specify how the array elements are layed-out - in memory. - - 'contig' -- The data is contiguous in memory along this dimension. - At most one dimension may be specified as 'contig'. - 'strided' -- The data isn't contiguous along this dimension. - 'follow' -- Used for C/Fortran contiguous arrays, a 'follow' dimension - has its stride automatically computed from extents of the other - dimensions to ensure C or Fortran memory layout. - - C-contiguous memory has 'direct' as the access spec, 'contig' as the - *last* axis' packing spec and 'follow' for all other packing specs. - - Fortran-contiguous memory has 'direct' as the access spec, 'contig' as - the *first* axis' packing spec and 'follow' for all other packing - specs. - """ - from . import Buffer, MemoryView - - self.dtype = base_dtype - self.axes = axes - self.ndim = len(axes) - self.flags = MemoryView.get_buf_flags(self.axes) - - self.is_c_contig, self.is_f_contig = MemoryView.is_cf_contig(self.axes) - assert not (self.is_c_contig and self.is_f_contig) - - self.mode = MemoryView.get_mode(axes) - self.writable_needed = False - - if not self.dtype.is_fused: - self.dtype_name = Buffer.mangle_dtype_name(self.dtype) - - def __hash__(self): - return hash(self.__class__) ^ hash(self.dtype) ^ hash(tuple(self.axes)) - - def __eq__(self, other): - if isinstance(other, BaseType): - return self.same_as_resolved_type(other) - else: - return False - - def same_as_resolved_type(self, other_type): - return ((other_type.is_memoryviewslice and - #self.writable_needed == other_type.writable_needed and # FIXME: should be only uni-directional - self.dtype.same_as(other_type.dtype) and - self.axes == other_type.axes) or - other_type is error_type) - - def needs_nonecheck(self): - return True - - def is_complete(self): - # incomplete since the underlying struct doesn't have a cython.memoryview object. - return 0 - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - # XXX: we put these guards in for now... - assert not pyrex - assert not dll_linkage - from . import MemoryView - base_code = str(self) if for_display else MemoryView.memviewslice_cname - return self.base_declaration_code( - base_code, - entity_code) - - def attributes_known(self): - if self.scope is None: - from . import Symtab - - self.scope = scope = Symtab.CClassScope( - 'mvs_class_'+self.specialization_suffix(), - None, - visibility='extern') - - scope.parent_type = self - scope.directives = {} - - scope.declare_var('_data', c_char_ptr_type, None, - cname='data', is_cdef=1) - - return True - - def declare_attribute(self, attribute, env, pos): - from . import MemoryView, Options - - scope = self.scope - - if attribute == 'shape': - scope.declare_var('shape', - c_array_type(c_py_ssize_t_type, - Options.buffer_max_dims), - pos, - cname='shape', - is_cdef=1) - - elif attribute == 'strides': - scope.declare_var('strides', - c_array_type(c_py_ssize_t_type, - Options.buffer_max_dims), - pos, - cname='strides', - is_cdef=1) - - elif attribute == 'suboffsets': - scope.declare_var('suboffsets', - c_array_type(c_py_ssize_t_type, - Options.buffer_max_dims), - pos, - cname='suboffsets', - is_cdef=1) - - elif attribute in ("copy", "copy_fortran"): - ndim = len(self.axes) - - follow_dim = [('direct', 'follow')] - contig_dim = [('direct', 'contig')] - to_axes_c = follow_dim * (ndim - 1) + contig_dim - to_axes_f = contig_dim + follow_dim * (ndim -1) - - dtype = self.dtype - if dtype.is_const: - dtype = dtype.const_base_type - - to_memview_c = MemoryViewSliceType(dtype, to_axes_c) - to_memview_f = MemoryViewSliceType(dtype, to_axes_f) - - for to_memview, cython_name in [(to_memview_c, "copy"), - (to_memview_f, "copy_fortran")]: - copy_func_type = CFuncType( - to_memview, - [CFuncTypeArg("memviewslice", self, None)]) - copy_cname = MemoryView.copy_c_or_fortran_cname(to_memview) - - entry = scope.declare_cfunction( - cython_name, - copy_func_type, pos=pos, defining=1, - cname=copy_cname) - - utility = MemoryView.get_copy_new_utility(pos, self, to_memview) - env.use_utility_code(utility) - - MemoryView.use_cython_array_utility_code(env) - - elif attribute in ("is_c_contig", "is_f_contig"): - # is_c_contig and is_f_contig functions - for (c_or_f, cython_name) in (('C', 'is_c_contig'), ('F', 'is_f_contig')): - - is_contig_name = MemoryView.get_is_contig_func_name(c_or_f, self.ndim) - - cfunctype = CFuncType( - return_type=c_bint_type, - args=[CFuncTypeArg("memviewslice", self, None)], - exception_value="-1", - ) - - entry = scope.declare_cfunction(cython_name, - cfunctype, - pos=pos, - defining=1, - cname=is_contig_name) - - entry.utility_code_definition = MemoryView.get_is_contig_utility(c_or_f, self.ndim) - - return True - - def get_entry(self, node, cname=None, type=None): - from . import MemoryView, Symtab - - if cname is None: - assert node.is_simple() or node.is_temp or node.is_elemental - cname = node.result() - - if type is None: - type = node.type - - entry = Symtab.Entry(cname, cname, type, node.pos) - return MemoryView.MemoryViewSliceBufferEntry(entry) - - def conforms_to(self, dst, broadcast=False, copying=False): - """ - Returns True if src conforms to dst, False otherwise. - - If conformable, the types are the same, the ndims are equal, and each axis spec is conformable. - - Any packing/access spec is conformable to itself. - - 'direct' and 'ptr' are conformable to 'full'. - 'contig' and 'follow' are conformable to 'strided'. - Any other combo is not conformable. - """ - from . import MemoryView - - src = self - - #if not copying and self.writable_needed and not dst.writable_needed: - # return False - - src_dtype, dst_dtype = src.dtype, dst.dtype - if dst_dtype.is_const: - # Requesting read-only views is always ok => consider only the non-const base type. - dst_dtype = dst_dtype.const_base_type - if src_dtype.is_const: - # When assigning between read-only views, compare only the non-const base types. - src_dtype = src_dtype.const_base_type - elif copying and src_dtype.is_const: - # Copying by value => ignore const on source. - src_dtype = src_dtype.const_base_type - - if src_dtype != dst_dtype: - return False - - if src.ndim != dst.ndim: - if broadcast: - src, dst = MemoryView.broadcast_types(src, dst) - else: - return False - - for src_spec, dst_spec in zip(src.axes, dst.axes): - src_access, src_packing = src_spec - dst_access, dst_packing = dst_spec - if src_access != dst_access and dst_access != 'full': - return False - if src_packing != dst_packing and dst_packing != 'strided' and not copying: - return False - - return True - - def valid_dtype(self, dtype, i=0): - """ - Return whether type dtype can be used as the base type of a - memoryview slice. - - We support structs, numeric types and objects - """ - if dtype.is_complex and dtype.real_type.is_int: - return False - - if dtype.is_struct and dtype.kind == 'struct': - for member in dtype.scope.var_entries: - if not self.valid_dtype(member.type): - return False - - return True - - return ( - dtype.is_error or - # Pointers are not valid (yet) - # (dtype.is_ptr and valid_memslice_dtype(dtype.base_type)) or - (dtype.is_array and i < 8 and self.valid_dtype(dtype.base_type, i + 1)) or - dtype.is_numeric or - dtype.is_pyobject or - dtype.is_fused or # accept this as it will be replaced by specializations later - (dtype.is_typedef and self.valid_dtype(dtype.typedef_base_type)) - ) - - def validate_memslice_dtype(self, pos): - if not self.valid_dtype(self.dtype): - error(pos, "Invalid base type for memoryview slice: %s" % self.dtype) - - def assert_direct_dims(self, pos): - for access, packing in self.axes: - if access != 'direct': - error(pos, "All dimensions must be direct") - return False - return True - - def transpose(self, pos): - if not self.assert_direct_dims(pos): - return error_type - return MemoryViewSliceType(self.dtype, self.axes[::-1]) - - def specialization_name(self): - return '%s_%s' % ( - super(MemoryViewSliceType,self).specialization_name(), - self.specialization_suffix()) - - def specialization_suffix(self): - return "%s_%s" % (self.axes_to_name(), self.dtype_name) - - def can_coerce_to_pyobject(self, env): - return True - - def can_coerce_from_pyobject(self, env): - return True - - def check_for_null_code(self, cname): - return cname + '.memview' - - def create_from_py_utility_code(self, env): - from . import MemoryView, Buffer - - # We don't have 'code', so use a LazyUtilityCode with a callback. - def lazy_utility_callback(code): - context['dtype_typeinfo'] = Buffer.get_type_information_cname(code, self.dtype) - return TempitaUtilityCode.load( - "ObjectToMemviewSlice", "MemoryView_C.c", context=context) - - env.use_utility_code(MemoryView.memviewslice_init_code) - env.use_utility_code(LazyUtilityCode(lazy_utility_callback)) - - if self.is_c_contig: - c_or_f_flag = "__Pyx_IS_C_CONTIG" - elif self.is_f_contig: - c_or_f_flag = "__Pyx_IS_F_CONTIG" - else: - c_or_f_flag = "0" - - suffix = self.specialization_suffix() - funcname = "__Pyx_PyObject_to_MemoryviewSlice_" + suffix - - context = dict( - MemoryView.context, - buf_flag = self.flags, - ndim = self.ndim, - axes_specs = ', '.join(self.axes_to_code()), - dtype_typedecl = self.dtype.empty_declaration_code(), - struct_nesting_depth = self.dtype.struct_nesting_depth(), - c_or_f_flag = c_or_f_flag, - funcname = funcname, - ) - - self.from_py_function = funcname - return True - - def from_py_call_code(self, source_code, result_code, error_pos, code, - from_py_function=None, error_condition=None): - # NOTE: auto-detection of readonly buffers is disabled: - # writable = self.writable_needed or not self.dtype.is_const - writable = not self.dtype.is_const - return self._assign_from_py_code( - source_code, result_code, error_pos, code, from_py_function, error_condition, - extra_args=['PyBUF_WRITABLE' if writable else '0']) - - def create_to_py_utility_code(self, env): - self._dtype_to_py_func, self._dtype_from_py_func = self.dtype_object_conversion_funcs(env) - return True - - def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): - assert self._dtype_to_py_func - assert self._dtype_from_py_func - - to_py_func = "(PyObject *(*)(char *)) " + self._dtype_to_py_func - from_py_func = "(int (*)(char *, PyObject *)) " + self._dtype_from_py_func - - tup = (result_code, source_code, self.ndim, to_py_func, from_py_func, self.dtype.is_pyobject) - return "%s = __pyx_memoryview_fromslice(%s, %s, %s, %s, %d);" % tup - - def dtype_object_conversion_funcs(self, env): - get_function = "__pyx_memview_get_%s" % self.dtype_name - set_function = "__pyx_memview_set_%s" % self.dtype_name - - context = dict( - get_function = get_function, - set_function = set_function, - ) - - if self.dtype.is_pyobject: - utility_name = "MemviewObjectToObject" - else: - self.dtype.create_to_py_utility_code(env) - to_py_function = self.dtype.to_py_function - - from_py_function = None - if not self.dtype.is_const: - self.dtype.create_from_py_utility_code(env) - from_py_function = self.dtype.from_py_function - - if not (to_py_function or from_py_function): - return "NULL", "NULL" - if not to_py_function: - get_function = "NULL" - if not from_py_function: - set_function = "NULL" - - utility_name = "MemviewDtypeToObject" - error_condition = (self.dtype.error_condition('value') or - 'PyErr_Occurred()') - context.update( - to_py_function=to_py_function, - from_py_function=from_py_function, - dtype=self.dtype.empty_declaration_code(), - error_condition=error_condition, - ) - - utility = TempitaUtilityCode.load_cached( - utility_name, "MemoryView_C.c", context=context) - env.use_utility_code(utility) - return get_function, set_function - - def axes_to_code(self): - """Return a list of code constants for each axis""" - from . import MemoryView - d = MemoryView._spec_to_const - return ["(%s | %s)" % (d[a], d[p]) for a, p in self.axes] - - def axes_to_name(self): - """Return an abbreviated name for our axes""" - from . import MemoryView - d = MemoryView._spec_to_abbrev - return "".join(["%s%s" % (d[a], d[p]) for a, p in self.axes]) - - def error_condition(self, result_code): - return "!%s.memview" % result_code - - def __str__(self): - from . import MemoryView - - axes_code_list = [] - for idx, (access, packing) in enumerate(self.axes): - flag = MemoryView.get_memoryview_flag(access, packing) - if flag == "strided": - axes_code_list.append(":") - else: - if flag == 'contiguous': - have_follow = [p for a, p in self.axes[idx - 1:idx + 2] - if p == 'follow'] - if have_follow or self.ndim == 1: - flag = '1' - - axes_code_list.append("::" + flag) - - if self.dtype.is_pyobject: - dtype_name = self.dtype.name - else: - dtype_name = self.dtype - - return "%s[%s]" % (dtype_name, ", ".join(axes_code_list)) - - def specialize(self, values): - """This does not validate the base type!!""" - dtype = self.dtype.specialize(values) - if dtype is not self.dtype: - return MemoryViewSliceType(dtype, self.axes) - - return self - - def cast_code(self, expr_code): - return expr_code - - -class BufferType(BaseType): - # - # Delegates most attribute lookups to the base type. - # (Anything not defined here or in the BaseType is delegated.) - # - # dtype PyrexType - # ndim int - # mode str - # negative_indices bool - # cast bool - # is_buffer bool - # writable bool - - is_buffer = 1 - writable = True - - subtypes = ['dtype'] - - def __init__(self, base, dtype, ndim, mode, negative_indices, cast): - self.base = base - self.dtype = dtype - self.ndim = ndim - self.buffer_ptr_type = CPtrType(dtype) - self.mode = mode - self.negative_indices = negative_indices - self.cast = cast - self.is_numpy_buffer = self.base.name == "ndarray" - - def can_coerce_to_pyobject(self,env): - return True - - def can_coerce_from_pyobject(self,env): - return True - - def as_argument_type(self): - return self - - def specialize(self, values): - dtype = self.dtype.specialize(values) - if dtype is not self.dtype: - return BufferType(self.base, dtype, self.ndim, self.mode, - self.negative_indices, self.cast) - return self - - def get_entry(self, node): - from . import Buffer - assert node.is_name - return Buffer.BufferEntry(node.entry) - - def __getattr__(self, name): - return getattr(self.base, name) - - def __repr__(self): - return "<BufferType %r>" % self.base - - def __str__(self): - # avoid ', ', as fused functions split the signature string on ', ' - cast_str = '' - if self.cast: - cast_str = ',cast=True' - - return "%s[%s,ndim=%d%s]" % (self.base, self.dtype, self.ndim, - cast_str) - - def assignable_from(self, other_type): - if other_type.is_buffer: - return (self.same_as(other_type, compare_base=False) and - self.base.assignable_from(other_type.base)) - - return self.base.assignable_from(other_type) - - def same_as(self, other_type, compare_base=True): - if not other_type.is_buffer: - return other_type.same_as(self.base) - - return (self.dtype.same_as(other_type.dtype) and - self.ndim == other_type.ndim and - self.mode == other_type.mode and - self.cast == other_type.cast and - (not compare_base or self.base.same_as(other_type.base))) - - -class PyObjectType(PyrexType): - # - # Base class for all Python object types (reference-counted). - # - # buffer_defaults dict or None Default options for bu - - name = "object" - is_pyobject = 1 - default_value = "0" - declaration_value = "0" - buffer_defaults = None - is_extern = False - is_subclassed = False - is_gc_simple = False - - def __str__(self): - return "Python object" - - def __repr__(self): - return "<PyObjectType>" - - def can_coerce_to_pyobject(self, env): - return True - - def can_coerce_from_pyobject(self, env): - return True - - def default_coerced_ctype(self): - """The default C type that this Python type coerces to, or None.""" - return None - - def assignable_from(self, src_type): - # except for pointers, conversion will be attempted - return not src_type.is_ptr or src_type.is_string or src_type.is_pyunicode_ptr - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - if pyrex or for_display: - base_code = "object" - else: - base_code = public_decl("PyObject", dll_linkage) - entity_code = "*%s" % entity_code - return self.base_declaration_code(base_code, entity_code) - - def as_pyobject(self, cname): - if (not self.is_complete()) or self.is_extension_type: - return "(PyObject *)" + cname - else: - return cname - - def py_type_name(self): - return "object" - - def __lt__(self, other): - """ - Make sure we sort highest, as instance checking on py_type_name - ('object') is always true - """ - return False - - def global_init_code(self, entry, code): - code.put_init_var_to_py_none(entry, nanny=False) - - def check_for_null_code(self, cname): - return cname - - -builtin_types_that_cannot_create_refcycles = set([ - 'bool', 'int', 'long', 'float', 'complex', - 'bytearray', 'bytes', 'unicode', 'str', 'basestring' -]) - - -class BuiltinObjectType(PyObjectType): - # objstruct_cname string Name of PyObject struct - - is_builtin_type = 1 - has_attributes = 1 - base_type = None - module_name = '__builtin__' - require_exact = 1 - - # fields that let it look like an extension type - vtabslot_cname = None - vtabstruct_cname = None - vtabptr_cname = None - typedef_flag = True - is_external = True - decl_type = 'PyObject' - - def __init__(self, name, cname, objstruct_cname=None): - self.name = name - self.cname = cname - self.typeptr_cname = "(&%s)" % cname - self.objstruct_cname = objstruct_cname - self.is_gc_simple = name in builtin_types_that_cannot_create_refcycles - if name == 'type': - # Special case the type type, as many C API calls (and other - # libraries) actually expect a PyTypeObject* for type arguments. - self.decl_type = objstruct_cname - if name == 'Exception': - self.require_exact = 0 - - def set_scope(self, scope): - self.scope = scope - if scope: - scope.parent_type = self - - def __str__(self): - return "%s object" % self.name - - def __repr__(self): - return "<%s>"% self.cname - - def default_coerced_ctype(self): - if self.name in ('bytes', 'bytearray'): - return c_char_ptr_type - elif self.name == 'bool': - return c_bint_type - elif self.name == 'float': - return c_double_type - return None - - def assignable_from(self, src_type): - if isinstance(src_type, BuiltinObjectType): - if self.name == 'basestring': - return src_type.name in ('str', 'unicode', 'basestring') - else: - return src_type.name == self.name - elif src_type.is_extension_type: - # FIXME: This is an ugly special case that we currently - # keep supporting. It allows users to specify builtin - # types as external extension types, while keeping them - # compatible with the real builtin types. We already - # generate a warning for it. Big TODO: remove! - return (src_type.module_name == '__builtin__' and - src_type.name == self.name) - else: - return True - - def typeobj_is_available(self): - return True - - def attributes_known(self): - return True - - def subtype_of(self, type): - return type.is_pyobject and type.assignable_from(self) - - def type_check_function(self, exact=True): - type_name = self.name - if type_name == 'str': - type_check = 'PyString_Check' - elif type_name == 'basestring': - type_check = '__Pyx_PyBaseString_Check' - elif type_name == 'Exception': - type_check = '__Pyx_PyException_Check' - elif type_name == 'bytearray': - type_check = 'PyByteArray_Check' - elif type_name == 'frozenset': - type_check = 'PyFrozenSet_Check' - else: - type_check = 'Py%s_Check' % type_name.capitalize() - if exact and type_name not in ('bool', 'slice', 'Exception'): - type_check += 'Exact' - return type_check - - def isinstance_code(self, arg): - return '%s(%s)' % (self.type_check_function(exact=False), arg) - - def type_test_code(self, arg, notnone=False, exact=True): - type_check = self.type_check_function(exact=exact) - check = 'likely(%s(%s))' % (type_check, arg) - if not notnone: - check += '||((%s) == Py_None)' % arg - if self.name == 'basestring': - name = '(PY_MAJOR_VERSION < 3 ? "basestring" : "str")' - space_for_name = 16 - else: - name = '"%s"' % self.name - # avoid wasting too much space but limit number of different format strings - space_for_name = (len(self.name) // 16 + 1) * 16 - error = '(PyErr_Format(PyExc_TypeError, "Expected %%.%ds, got %%.200s", %s, Py_TYPE(%s)->tp_name), 0)' % ( - space_for_name, name, arg) - return check + '||' + error - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - if pyrex or for_display: - base_code = self.name - else: - base_code = public_decl(self.decl_type, dll_linkage) - entity_code = "*%s" % entity_code - return self.base_declaration_code(base_code, entity_code) - - def as_pyobject(self, cname): - if self.decl_type == 'PyObject': - return cname - else: - return "(PyObject *)" + cname - - def cast_code(self, expr_code, to_object_struct = False): - return "((%s*)%s)" % ( - to_object_struct and self.objstruct_cname or self.decl_type, # self.objstruct_cname may be None - expr_code) - - def py_type_name(self): - return self.name - - - -class PyExtensionType(PyObjectType): - # - # A Python extension type. - # - # name string - # scope CClassScope Attribute namespace - # visibility string - # typedef_flag boolean - # base_type PyExtensionType or None - # module_name string or None Qualified name of defining module - # objstruct_cname string Name of PyObject struct - # objtypedef_cname string Name of PyObject struct typedef - # typeobj_cname string or None C code fragment referring to type object - # typeptr_cname string or None Name of pointer to external type object - # vtabslot_cname string Name of C method table member - # vtabstruct_cname string Name of C method table struct - # vtabptr_cname string Name of pointer to C method table - # vtable_cname string Name of C method table definition - # early_init boolean Whether to initialize early (as opposed to during module execution). - # defered_declarations [thunk] Used to declare class hierarchies in order - # check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match - - is_extension_type = 1 - has_attributes = 1 - early_init = 1 - - objtypedef_cname = None - - def __init__(self, name, typedef_flag, base_type, is_external=0, check_size=None): - self.name = name - self.scope = None - self.typedef_flag = typedef_flag - if base_type is not None: - base_type.is_subclassed = True - self.base_type = base_type - self.module_name = None - self.objstruct_cname = None - self.typeobj_cname = None - self.typeptr_cname = None - self.vtabslot_cname = None - self.vtabstruct_cname = None - self.vtabptr_cname = None - self.vtable_cname = None - self.is_external = is_external - self.check_size = check_size or 'warn' - self.defered_declarations = [] - - def set_scope(self, scope): - self.scope = scope - if scope: - scope.parent_type = self - - def needs_nonecheck(self): - return True - - def subtype_of_resolved_type(self, other_type): - if other_type.is_extension_type or other_type.is_builtin_type: - return self is other_type or ( - self.base_type and self.base_type.subtype_of(other_type)) - else: - return other_type is py_object_type - - def typeobj_is_available(self): - # Do we have a pointer to the type object? - return self.typeptr_cname - - def typeobj_is_imported(self): - # If we don't know the C name of the type object but we do - # know which module it's defined in, it will be imported. - return self.typeobj_cname is None and self.module_name is not None - - def assignable_from(self, src_type): - if self == src_type: - return True - if isinstance(src_type, PyExtensionType): - if src_type.base_type is not None: - return self.assignable_from(src_type.base_type) - if isinstance(src_type, BuiltinObjectType): - # FIXME: This is an ugly special case that we currently - # keep supporting. It allows users to specify builtin - # types as external extension types, while keeping them - # compatible with the real builtin types. We already - # generate a warning for it. Big TODO: remove! - return (self.module_name == '__builtin__' and - self.name == src_type.name) - return False - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0, deref = 0): - if pyrex or for_display: - base_code = self.name - else: - if self.typedef_flag: - objstruct = self.objstruct_cname - else: - objstruct = "struct %s" % self.objstruct_cname - base_code = public_decl(objstruct, dll_linkage) - if deref: - assert not entity_code - else: - entity_code = "*%s" % entity_code - return self.base_declaration_code(base_code, entity_code) - - def type_test_code(self, py_arg, notnone=False): - - none_check = "((%s) == Py_None)" % py_arg - type_check = "likely(__Pyx_TypeTest(%s, %s))" % ( - py_arg, self.typeptr_cname) - if notnone: - return type_check - else: - return "likely(%s || %s)" % (none_check, type_check) - - def attributes_known(self): - return self.scope is not None - - def __str__(self): - return self.name - - def __repr__(self): - return "<PyExtensionType %s%s>" % (self.scope.class_name, - ("", " typedef")[self.typedef_flag]) - - def py_type_name(self): - if not self.module_name: - return self.name - - return "__import__(%r, None, None, ['']).%s" % (self.module_name, - self.name) - -class CType(PyrexType): - # - # Base class for all C types (non-reference-counted). - # - # to_py_function string C function for converting to Python object - # from_py_function string C function for constructing from Python object - # - - to_py_function = None - from_py_function = None - exception_value = None - exception_check = 1 - - def create_to_py_utility_code(self, env): - return self.to_py_function is not None - - def create_from_py_utility_code(self, env): - return self.from_py_function is not None - - def can_coerce_to_pyobject(self, env): - return self.create_to_py_utility_code(env) - - def can_coerce_from_pyobject(self, env): - return self.create_from_py_utility_code(env) - - def error_condition(self, result_code): - conds = [] - if self.is_string or self.is_pyunicode_ptr: - conds.append("(!%s)" % result_code) - elif self.exception_value is not None: - conds.append("(%s == (%s)%s)" % (result_code, self.sign_and_name(), self.exception_value)) - if self.exception_check: - conds.append("PyErr_Occurred()") - if len(conds) > 0: - return " && ".join(conds) - else: - return 0 - - def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): - func = self.to_py_function if to_py_function is None else to_py_function - assert func - if self.is_string or self.is_cpp_string: - if result_type.is_builtin_type: - result_type_name = result_type.name - if result_type_name in ('bytes', 'str', 'unicode'): - func = func.replace("Object", result_type_name.title(), 1) - elif result_type_name == 'bytearray': - func = func.replace("Object", "ByteArray", 1) - return '%s = %s(%s)' % ( - result_code, - func, - source_code or 'NULL') - - def from_py_call_code(self, source_code, result_code, error_pos, code, - from_py_function=None, error_condition=None): - return self._assign_from_py_code( - source_code, result_code, error_pos, code, from_py_function, error_condition) - - - -class PythranExpr(CType): - # Pythran object of a given type - - to_py_function = "__Pyx_pythran_to_python" - is_pythran_expr = True - writable = True - has_attributes = 1 - - def __init__(self, pythran_type, org_buffer=None): - self.org_buffer = org_buffer - self.pythran_type = pythran_type - self.name = self.pythran_type - self.cname = self.pythran_type - self.from_py_function = "from_python<%s>" % (self.pythran_type) - self.scope = None - - def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0): - assert not pyrex - return "%s %s" % (self.cname, entity_code) - - def attributes_known(self): - if self.scope is None: - from . import Symtab - # FIXME: fake C scope, might be better represented by a struct or C++ class scope - self.scope = scope = Symtab.CClassScope('', None, visibility="extern") - scope.parent_type = self - scope.directives = {} - scope.declare_var("shape", CPtrType(c_long_type), None, cname="_shape", is_cdef=True) - scope.declare_var("ndim", c_long_type, None, cname="value", is_cdef=True) - - return True - - def __eq__(self, other): - return isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type - - def __ne__(self, other): - return not (isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type) - - def __hash__(self): - return hash(self.pythran_type) - - -class CConstType(BaseType): - - is_const = 1 - - def __init__(self, const_base_type): - self.const_base_type = const_base_type - if const_base_type.has_attributes and const_base_type.scope is not None: - from . import Symtab - self.scope = Symtab.CConstScope(const_base_type.scope) - - def __repr__(self): - return "<CConstType %s>" % repr(self.const_base_type) - - def __str__(self): - return self.declaration_code("", for_display=1) - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - if for_display or pyrex: - return "const " + self.const_base_type.declaration_code(entity_code, for_display, dll_linkage, pyrex) - else: - return self.const_base_type.declaration_code("const %s" % entity_code, for_display, dll_linkage, pyrex) - - def specialize(self, values): - base_type = self.const_base_type.specialize(values) - if base_type == self.const_base_type: - return self - else: - return CConstType(base_type) - - def deduce_template_params(self, actual): - return self.const_base_type.deduce_template_params(actual) - - def can_coerce_to_pyobject(self, env): - return self.const_base_type.can_coerce_to_pyobject(env) - - def can_coerce_from_pyobject(self, env): - return self.const_base_type.can_coerce_from_pyobject(env) - - def create_to_py_utility_code(self, env): - if self.const_base_type.create_to_py_utility_code(env): - self.to_py_function = self.const_base_type.to_py_function - return True - - def same_as_resolved_type(self, other_type): - if other_type.is_const: - return self.const_base_type.same_as_resolved_type(other_type.const_base_type) - # Accept const LHS <- non-const RHS. - return self.const_base_type.same_as_resolved_type(other_type) - - def __getattr__(self, name): - return getattr(self.const_base_type, name) - - -class FusedType(CType): - """ - Represents a Fused Type. All it needs to do is keep track of the types - it aggregates, as it will be replaced with its specific version wherever - needed. - - See http://wiki.cython.org/enhancements/fusedtypes - - types [PyrexType] is the list of types to be fused - name str the name of the ctypedef - """ - - is_fused = 1 - exception_check = 0 - - def __init__(self, types, name=None): - # Use list rather than set to preserve order (list should be short). - flattened_types = [] - for t in types: - if t.is_fused: - # recursively merge in subtypes - for subtype in t.types: - if subtype not in flattened_types: - flattened_types.append(subtype) - elif t not in flattened_types: - flattened_types.append(t) - self.types = flattened_types - self.name = name - - def declaration_code(self, entity_code, for_display = 0, - dll_linkage = None, pyrex = 0): - if pyrex or for_display: - return self.name - - raise Exception("This may never happen, please report a bug") - - def __repr__(self): - return 'FusedType(name=%r)' % self.name - - def specialize(self, values): - return values[self] - - def get_fused_types(self, result=None, seen=None): - if result is None: - return [self] - - if self not in seen: - result.append(self) - seen.add(self) - - -class CVoidType(CType): - # - # C "void" type - # - - is_void = 1 - to_py_function = "__Pyx_void_to_None" - - def __repr__(self): - return "<CVoidType>" - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - if pyrex or for_display: - base_code = "void" - else: - base_code = public_decl("void", dll_linkage) - return self.base_declaration_code(base_code, entity_code) - - def is_complete(self): - return 0 - -class InvisibleVoidType(CVoidType): - # - # For use with C++ constructors and destructors return types. - # Acts like void, but does not print out a declaration. - # - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - if pyrex or for_display: - base_code = "[void]" - else: - base_code = public_decl("", dll_linkage) - return self.base_declaration_code(base_code, entity_code) - - -class CNumericType(CType): - # - # Base class for all C numeric types. - # - # rank integer Relative size - # signed integer 0 = unsigned, 1 = unspecified, 2 = explicitly signed - # - - is_numeric = 1 - default_value = "0" - has_attributes = True - scope = None - - sign_words = ("unsigned ", "", "signed ") - - def __init__(self, rank, signed = 1): - self.rank = rank - if rank > 0 and signed == SIGNED: - # Signed is meaningless for anything but char, and complicates - # type promotion. - signed = 1 - self.signed = signed - - def sign_and_name(self): - s = self.sign_words[self.signed] - n = rank_to_type_name[self.rank] - return s + n - - def __repr__(self): - return "<CNumericType %s>" % self.sign_and_name() - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - type_name = self.sign_and_name() - if pyrex or for_display: - base_code = type_name.replace('PY_LONG_LONG', 'long long') - else: - base_code = public_decl(type_name, dll_linkage) - return self.base_declaration_code(base_code, entity_code) - - def attributes_known(self): - if self.scope is None: - from . import Symtab - self.scope = scope = Symtab.CClassScope( - '', - None, - visibility="extern") - scope.parent_type = self - scope.directives = {} - scope.declare_cfunction( - "conjugate", - CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True), - pos=None, - defining=1, - cname=" ") - return True - - def __lt__(self, other): - """Sort based on rank, preferring signed over unsigned""" - if other.is_numeric: - return self.rank > other.rank and self.signed >= other.signed - - # Prefer numeric types over others - return True - - def py_type_name(self): - if self.rank <= 4: - return "(int, long)" - return "float" - - -class ForbidUseClass: - def __repr__(self): - raise RuntimeError() - def __str__(self): - raise RuntimeError() -ForbidUse = ForbidUseClass() - - -class CIntLike(object): - """Mixin for shared behaviour of C integers and enums. - """ - to_py_function = None - from_py_function = None - to_pyunicode_utility = None - default_format_spec = 'd' - - def can_coerce_to_pyobject(self, env): - return True - - def can_coerce_from_pyobject(self, env): - return True - - def create_to_py_utility_code(self, env): - if type(self).to_py_function is None: - self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name() - env.use_utility_code(TempitaUtilityCode.load_cached( - "CIntToPy", "TypeConversion.c", - context={"TYPE": self.empty_declaration_code(), - "TO_PY_FUNCTION": self.to_py_function})) - return True - - def create_from_py_utility_code(self, env): - if type(self).from_py_function is None: - self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name() - env.use_utility_code(TempitaUtilityCode.load_cached( - "CIntFromPy", "TypeConversion.c", - context={"TYPE": self.empty_declaration_code(), - "FROM_PY_FUNCTION": self.from_py_function})) - return True - - @staticmethod - def _parse_format(format_spec): - padding = ' ' - if not format_spec: - return ('d', 0, padding) - format_type = format_spec[-1] - if format_type in ('o', 'd', 'x', 'X'): - prefix = format_spec[:-1] - elif format_type.isdigit(): - format_type = 'd' - prefix = format_spec - else: - return (None, 0, padding) - if not prefix: - return (format_type, 0, padding) - if prefix[0] == '-': - prefix = prefix[1:] - if prefix and prefix[0] == '0': - padding = '0' - prefix = prefix.lstrip('0') - if prefix.isdigit(): - return (format_type, int(prefix), padding) - return (None, 0, padding) - - def can_coerce_to_pystring(self, env, format_spec=None): - format_type, width, padding = self._parse_format(format_spec) - return format_type is not None and width <= 2**30 - - def convert_to_pystring(self, cvalue, code, format_spec=None): - if self.to_pyunicode_utility is None: - utility_code_name = "__Pyx_PyUnicode_From_" + self.specialization_name() - to_pyunicode_utility = TempitaUtilityCode.load_cached( - "CIntToPyUnicode", "TypeConversion.c", - context={"TYPE": self.empty_declaration_code(), - "TO_PY_FUNCTION": utility_code_name}) - self.to_pyunicode_utility = (utility_code_name, to_pyunicode_utility) - else: - utility_code_name, to_pyunicode_utility = self.to_pyunicode_utility - code.globalstate.use_utility_code(to_pyunicode_utility) - format_type, width, padding_char = self._parse_format(format_spec) - return "%s(%s, %d, '%s', '%s')" % (utility_code_name, cvalue, width, padding_char, format_type) - - -class CIntType(CIntLike, CNumericType): - - is_int = 1 - typedef_flag = 0 - exception_value = -1 - - def get_to_py_type_conversion(self): - if self.rank < list(rank_to_type_name).index('int'): - # This assumes sizeof(short) < sizeof(int) - return "PyInt_FromLong" - else: - # Py{Int|Long}_From[Unsigned]Long[Long] - Prefix = "Int" - SignWord = "" - TypeName = "Long" - if not self.signed: - Prefix = "Long" - SignWord = "Unsigned" - if self.rank >= list(rank_to_type_name).index('PY_LONG_LONG'): - Prefix = "Long" - TypeName = "LongLong" - return "Py%s_From%s%s" % (Prefix, SignWord, TypeName) - - def assignable_from_resolved_type(self, src_type): - return src_type.is_int or src_type.is_enum or src_type is error_type - - def invalid_value(self): - if rank_to_type_name[int(self.rank)] == 'char': - return "'?'" - else: - # We do not really know the size of the type, so return - # a 32-bit literal and rely on casting to final type. It will - # be negative for signed ints, which is good. - return "0xbad0bad0" - - def overflow_check_binop(self, binop, env, const_rhs=False): - env.use_utility_code(UtilityCode.load("Common", "Overflow.c")) - type = self.empty_declaration_code() - name = self.specialization_name() - if binop == "lshift": - env.use_utility_code(TempitaUtilityCode.load_cached( - "LeftShift", "Overflow.c", - context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed})) - else: - if const_rhs: - binop += "_const" - if type in ('int', 'long', 'long long'): - env.use_utility_code(TempitaUtilityCode.load_cached( - "BaseCaseSigned", "Overflow.c", - context={'INT': type, 'NAME': name})) - elif type in ('unsigned int', 'unsigned long', 'unsigned long long'): - env.use_utility_code(TempitaUtilityCode.load_cached( - "BaseCaseUnsigned", "Overflow.c", - context={'UINT': type, 'NAME': name})) - elif self.rank <= 1: - # sizeof(short) < sizeof(int) - return "__Pyx_%s_%s_no_overflow" % (binop, name) - else: - _load_overflow_base(env) - env.use_utility_code(TempitaUtilityCode.load_cached( - "SizeCheck", "Overflow.c", - context={'TYPE': type, 'NAME': name})) - env.use_utility_code(TempitaUtilityCode.load_cached( - "Binop", "Overflow.c", - context={'TYPE': type, 'NAME': name, 'BINOP': binop})) - return "__Pyx_%s_%s_checking_overflow" % (binop, name) - - -def _load_overflow_base(env): - env.use_utility_code(UtilityCode.load("Common", "Overflow.c")) - for type in ('int', 'long', 'long long'): - env.use_utility_code(TempitaUtilityCode.load_cached( - "BaseCaseSigned", "Overflow.c", - context={'INT': type, 'NAME': type.replace(' ', '_')})) - for type in ('unsigned int', 'unsigned long', 'unsigned long long'): - env.use_utility_code(TempitaUtilityCode.load_cached( - "BaseCaseUnsigned", "Overflow.c", - context={'UINT': type, 'NAME': type.replace(' ', '_')})) - - -class CAnonEnumType(CIntType): - - is_enum = 1 - - def sign_and_name(self): - return 'int' - - -class CReturnCodeType(CIntType): - - to_py_function = "__Pyx_Owned_Py_None" - - is_returncode = True - exception_check = False - default_format_spec = '' - - def can_coerce_to_pystring(self, env, format_spec=None): - return not format_spec - - def convert_to_pystring(self, cvalue, code, format_spec=None): - return "__Pyx_NewRef(%s)" % code.globalstate.get_py_string_const(StringEncoding.EncodedString("None")).cname - - -class CBIntType(CIntType): - - to_py_function = "__Pyx_PyBool_FromLong" - from_py_function = "__Pyx_PyObject_IsTrue" - exception_check = 1 # for C++ bool - default_format_spec = '' - - def can_coerce_to_pystring(self, env, format_spec=None): - return not format_spec or super(CBIntType, self).can_coerce_to_pystring(env, format_spec) - - def convert_to_pystring(self, cvalue, code, format_spec=None): - if format_spec: - return super(CBIntType, self).convert_to_pystring(cvalue, code, format_spec) - # NOTE: no caching here as the string constant cnames depend on the current module - utility_code_name = "__Pyx_PyUnicode_FromBInt_" + self.specialization_name() - to_pyunicode_utility = TempitaUtilityCode.load_cached( - "CBIntToPyUnicode", "TypeConversion.c", context={ - "TRUE_CONST": code.globalstate.get_py_string_const(StringEncoding.EncodedString("True")).cname, - "FALSE_CONST": code.globalstate.get_py_string_const(StringEncoding.EncodedString("False")).cname, - "TO_PY_FUNCTION": utility_code_name, - }) - code.globalstate.use_utility_code(to_pyunicode_utility) - return "%s(%s)" % (utility_code_name, cvalue) - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - if for_display: - base_code = 'bool' - elif pyrex: - base_code = 'bint' - else: - base_code = public_decl('int', dll_linkage) - return self.base_declaration_code(base_code, entity_code) - - def __repr__(self): - return "<CNumericType bint>" - - def __str__(self): - return 'bint' - - def py_type_name(self): - return "bool" - - -class CPyUCS4IntType(CIntType): - # Py_UCS4 - - is_unicode_char = True - - # Py_UCS4 coerces from and to single character unicode strings (or - # at most two characters on 16bit Unicode builds), but we also - # allow Python integers as input. The value range for Py_UCS4 - # is 0..1114111, which is checked when converting from an integer - # value. - - to_py_function = "PyUnicode_FromOrdinal" - from_py_function = "__Pyx_PyObject_AsPy_UCS4" - - def can_coerce_to_pystring(self, env, format_spec=None): - return False # does the right thing anyway - - def create_from_py_utility_code(self, env): - env.use_utility_code(UtilityCode.load_cached("ObjectAsUCS4", "TypeConversion.c")) - return True - - def sign_and_name(self): - return "Py_UCS4" - - -class CPyUnicodeIntType(CIntType): - # Py_UNICODE - - is_unicode_char = True - - # Py_UNICODE coerces from and to single character unicode strings, - # but we also allow Python integers as input. The value range for - # Py_UNICODE is 0..1114111, which is checked when converting from - # an integer value. - - to_py_function = "PyUnicode_FromOrdinal" - from_py_function = "__Pyx_PyObject_AsPy_UNICODE" - - def can_coerce_to_pystring(self, env, format_spec=None): - return False # does the right thing anyway - - def create_from_py_utility_code(self, env): - env.use_utility_code(UtilityCode.load_cached("ObjectAsPyUnicode", "TypeConversion.c")) - return True - - def sign_and_name(self): - return "Py_UNICODE" - - -class CPyHashTType(CIntType): - - to_py_function = "__Pyx_PyInt_FromHash_t" - from_py_function = "__Pyx_PyInt_AsHash_t" - - def sign_and_name(self): - return "Py_hash_t" - -class CPySSizeTType(CIntType): - - to_py_function = "PyInt_FromSsize_t" - from_py_function = "__Pyx_PyIndex_AsSsize_t" - - def sign_and_name(self): - return "Py_ssize_t" - -class CSSizeTType(CIntType): - - to_py_function = "PyInt_FromSsize_t" - from_py_function = "PyInt_AsSsize_t" - - def sign_and_name(self): - return "Py_ssize_t" - -class CSizeTType(CIntType): - - to_py_function = "__Pyx_PyInt_FromSize_t" - - def sign_and_name(self): - return "size_t" - -class CPtrdiffTType(CIntType): - - def sign_and_name(self): - return "ptrdiff_t" - - -class CFloatType(CNumericType): - - is_float = 1 - to_py_function = "PyFloat_FromDouble" - from_py_function = "__pyx_PyFloat_AsDouble" - - exception_value = -1 - - def __init__(self, rank, math_h_modifier = ''): - CNumericType.__init__(self, rank, 1) - self.math_h_modifier = math_h_modifier - if rank == RANK_FLOAT: - self.from_py_function = "__pyx_PyFloat_AsFloat" - - def assignable_from_resolved_type(self, src_type): - return (src_type.is_numeric and not src_type.is_complex) or src_type is error_type - - def invalid_value(self): - return Naming.PYX_NAN - -class CComplexType(CNumericType): - - is_complex = 1 - to_py_function = "__pyx_PyComplex_FromComplex" - has_attributes = 1 - scope = None - - def __init__(self, real_type): - while real_type.is_typedef and not real_type.typedef_is_external: - real_type = real_type.typedef_base_type - self.funcsuffix = "_%s" % real_type.specialization_name() - if real_type.is_float: - self.math_h_modifier = real_type.math_h_modifier - else: - self.math_h_modifier = "_UNUSED" - - self.real_type = real_type - CNumericType.__init__(self, real_type.rank + 0.5, real_type.signed) - self.binops = {} - self.from_parts = "%s_from_parts" % self.specialization_name() - self.default_value = "%s(0, 0)" % self.from_parts - - def __eq__(self, other): - if isinstance(self, CComplexType) and isinstance(other, CComplexType): - return self.real_type == other.real_type - else: - return False - - def __ne__(self, other): - if isinstance(self, CComplexType) and isinstance(other, CComplexType): - return self.real_type != other.real_type - else: - return True - - def __lt__(self, other): - if isinstance(self, CComplexType) and isinstance(other, CComplexType): - return self.real_type < other.real_type - else: - # this is arbitrary, but it makes sure we always have - # *some* kind of order - return False - - def __hash__(self): - return ~hash(self.real_type) - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - if pyrex or for_display: - real_code = self.real_type.declaration_code("", for_display, dll_linkage, pyrex) - base_code = "%s complex" % real_code - else: - base_code = public_decl(self.sign_and_name(), dll_linkage) - return self.base_declaration_code(base_code, entity_code) - - def sign_and_name(self): - real_type_name = self.real_type.specialization_name() - real_type_name = real_type_name.replace('long__double','long_double') - real_type_name = real_type_name.replace('PY_LONG_LONG','long_long') - return Naming.type_prefix + real_type_name + "_complex" - - def assignable_from(self, src_type): - # Temporary hack/feature disabling, see #441 - if (not src_type.is_complex and src_type.is_numeric and src_type.is_typedef - and src_type.typedef_is_external): - return False - elif src_type.is_pyobject: - return True - else: - return super(CComplexType, self).assignable_from(src_type) - - def assignable_from_resolved_type(self, src_type): - return (src_type.is_complex and self.real_type.assignable_from_resolved_type(src_type.real_type) - or src_type.is_numeric and self.real_type.assignable_from_resolved_type(src_type) - or src_type is error_type) - - def attributes_known(self): - if self.scope is None: - from . import Symtab - self.scope = scope = Symtab.CClassScope( - '', - None, - visibility="extern") - scope.parent_type = self - scope.directives = {} - scope.declare_var("real", self.real_type, None, cname="real", is_cdef=True) - scope.declare_var("imag", self.real_type, None, cname="imag", is_cdef=True) - scope.declare_cfunction( - "conjugate", - CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True), - pos=None, - defining=1, - cname="__Pyx_c_conj%s" % self.funcsuffix) - - return True - - def _utility_code_context(self): - return { - 'type': self.empty_declaration_code(), - 'type_name': self.specialization_name(), - 'real_type': self.real_type.empty_declaration_code(), - 'func_suffix': self.funcsuffix, - 'm': self.math_h_modifier, - 'is_float': int(self.real_type.is_float) - } - - def create_declaration_utility_code(self, env): - # This must always be run, because a single CComplexType instance can be shared - # across multiple compilations (the one created in the module scope) - env.use_utility_code(UtilityCode.load_cached('Header', 'Complex.c')) - env.use_utility_code(UtilityCode.load_cached('RealImag', 'Complex.c')) - env.use_utility_code(TempitaUtilityCode.load_cached( - 'Declarations', 'Complex.c', self._utility_code_context())) - env.use_utility_code(TempitaUtilityCode.load_cached( - 'Arithmetic', 'Complex.c', self._utility_code_context())) - return True - - def can_coerce_to_pyobject(self, env): - return True - - def can_coerce_from_pyobject(self, env): - return True - - def create_to_py_utility_code(self, env): - env.use_utility_code(UtilityCode.load_cached('ToPy', 'Complex.c')) - return True - - def create_from_py_utility_code(self, env): - env.use_utility_code(TempitaUtilityCode.load_cached( - 'FromPy', 'Complex.c', self._utility_code_context())) - self.from_py_function = "__Pyx_PyComplex_As_" + self.specialization_name() - return True - - def lookup_op(self, nargs, op): - try: - return self.binops[nargs, op] - except KeyError: - pass - try: - op_name = complex_ops[nargs, op] - self.binops[nargs, op] = func_name = "__Pyx_c_%s%s" % (op_name, self.funcsuffix) - return func_name - except KeyError: - return None - - def unary_op(self, op): - return self.lookup_op(1, op) - - def binary_op(self, op): - return self.lookup_op(2, op) - - def py_type_name(self): - return "complex" - - def cast_code(self, expr_code): - return expr_code - -complex_ops = { - (1, '-'): 'neg', - (1, 'zero'): 'is_zero', - (2, '+'): 'sum', - (2, '-'): 'diff', - (2, '*'): 'prod', - (2, '/'): 'quot', - (2, '**'): 'pow', - (2, '=='): 'eq', -} - - -class CPyTSSTType(CType): - # - # PEP-539 "Py_tss_t" type - # - - declaration_value = "Py_tss_NEEDS_INIT" - - def __repr__(self): - return "<Py_tss_t>" - - def declaration_code(self, entity_code, - for_display=0, dll_linkage=None, pyrex=0): - if pyrex or for_display: - base_code = "Py_tss_t" - else: - base_code = public_decl("Py_tss_t", dll_linkage) - return self.base_declaration_code(base_code, entity_code) - - -class CPointerBaseType(CType): - # common base type for pointer/array types - # - # base_type CType Reference type - - subtypes = ['base_type'] - - def __init__(self, base_type): - self.base_type = base_type - if base_type.is_const: - base_type = base_type.const_base_type - for char_type in (c_char_type, c_uchar_type, c_schar_type): - if base_type.same_as(char_type): - self.is_string = 1 - break - else: - if base_type.same_as(c_py_unicode_type): - self.is_pyunicode_ptr = 1 - - if self.is_string and not base_type.is_error: - if base_type.signed == 2: - self.to_py_function = "__Pyx_PyObject_FromCString" - if self.is_ptr: - self.from_py_function = "__Pyx_PyObject_As%sSString" - elif base_type.signed: - self.to_py_function = "__Pyx_PyObject_FromString" - if self.is_ptr: - self.from_py_function = "__Pyx_PyObject_As%sString" - else: - self.to_py_function = "__Pyx_PyObject_FromCString" - if self.is_ptr: - self.from_py_function = "__Pyx_PyObject_As%sUString" - if self.is_ptr: - self.from_py_function %= '' if self.base_type.is_const else 'Writable' - self.exception_value = "NULL" - elif self.is_pyunicode_ptr and not base_type.is_error: - self.to_py_function = "__Pyx_PyUnicode_FromUnicode" - if self.is_ptr: - self.from_py_function = "__Pyx_PyUnicode_AsUnicode" - self.exception_value = "NULL" - - def py_type_name(self): - if self.is_string: - return "bytes" - elif self.is_pyunicode_ptr: - return "unicode" - else: - return super(CPointerBaseType, self).py_type_name() - - def literal_code(self, value): - if self.is_string: - assert isinstance(value, str) - return '"%s"' % StringEncoding.escape_byte_string(value) - - -class CArrayType(CPointerBaseType): - # base_type CType Element type - # size integer or None Number of elements - - is_array = 1 - to_tuple_function = None - - def __init__(self, base_type, size): - super(CArrayType, self).__init__(base_type) - self.size = size - - def __eq__(self, other): - if isinstance(other, CType) and other.is_array and self.size == other.size: - return self.base_type.same_as(other.base_type) - return False - - def __hash__(self): - return hash(self.base_type) + 28 # arbitrarily chosen offset - - def __repr__(self): - return "<CArrayType %s %s>" % (self.size, repr(self.base_type)) - - def same_as_resolved_type(self, other_type): - return ((other_type.is_array and - self.base_type.same_as(other_type.base_type)) - or other_type is error_type) - - def assignable_from_resolved_type(self, src_type): - # C arrays are assigned by value, either Python containers or C arrays/pointers - if src_type.is_pyobject: - return True - if src_type.is_ptr or src_type.is_array: - return self.base_type.assignable_from(src_type.base_type) - return False - - def element_ptr_type(self): - return c_ptr_type(self.base_type) - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - if self.size is not None: - dimension_code = self.size - else: - dimension_code = "" - if entity_code.startswith("*"): - entity_code = "(%s)" % entity_code - return self.base_type.declaration_code( - "%s[%s]" % (entity_code, dimension_code), - for_display, dll_linkage, pyrex) - - def as_argument_type(self): - return c_ptr_type(self.base_type) - - def is_complete(self): - return self.size is not None - - def specialize(self, values): - base_type = self.base_type.specialize(values) - if base_type == self.base_type: - return self - else: - return CArrayType(base_type, self.size) - - def deduce_template_params(self, actual): - if isinstance(actual, CArrayType): - return self.base_type.deduce_template_params(actual.base_type) - else: - return {} - - def can_coerce_to_pyobject(self, env): - return self.base_type.can_coerce_to_pyobject(env) - - def can_coerce_from_pyobject(self, env): - return self.base_type.can_coerce_from_pyobject(env) - - def create_to_py_utility_code(self, env): - if self.to_py_function is not None: - return self.to_py_function - if not self.base_type.create_to_py_utility_code(env): - return False - - safe_typename = self.base_type.specialization_name() - to_py_function = "__Pyx_carray_to_py_%s" % safe_typename - to_tuple_function = "__Pyx_carray_to_tuple_%s" % safe_typename - - from .UtilityCode import CythonUtilityCode - context = { - 'cname': to_py_function, - 'to_tuple_cname': to_tuple_function, - 'base_type': self.base_type, - } - env.use_utility_code(CythonUtilityCode.load( - "carray.to_py", "CConvert.pyx", - outer_module_scope=env.global_scope(), # need access to types declared in module - context=context, compiler_directives=dict(env.global_scope().directives))) - self.to_tuple_function = to_tuple_function - self.to_py_function = to_py_function - return True - - def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): - func = self.to_py_function if to_py_function is None else to_py_function - if self.is_string or self.is_pyunicode_ptr: - return '%s = %s(%s)' % ( - result_code, - func, - source_code) - target_is_tuple = result_type.is_builtin_type and result_type.name == 'tuple' - return '%s = %s(%s, %s)' % ( - result_code, - self.to_tuple_function if target_is_tuple else func, - source_code, - self.size) - - def create_from_py_utility_code(self, env): - if self.from_py_function is not None: - return self.from_py_function - if not self.base_type.create_from_py_utility_code(env): - return False - - from_py_function = "__Pyx_carray_from_py_%s" % self.base_type.specialization_name() - - from .UtilityCode import CythonUtilityCode - context = { - 'cname': from_py_function, - 'base_type': self.base_type, - } - env.use_utility_code(CythonUtilityCode.load( - "carray.from_py", "CConvert.pyx", - outer_module_scope=env.global_scope(), # need access to types declared in module - context=context, compiler_directives=dict(env.global_scope().directives))) - self.from_py_function = from_py_function - return True - - def from_py_call_code(self, source_code, result_code, error_pos, code, - from_py_function=None, error_condition=None): - assert not error_condition, '%s: %s' % (error_pos, error_condition) - call_code = "%s(%s, %s, %s)" % ( - from_py_function or self.from_py_function, - source_code, result_code, self.size) - return code.error_goto_if_neg(call_code, error_pos) - - -class CPtrType(CPointerBaseType): - # base_type CType Reference type - - is_ptr = 1 - default_value = "0" - - def __hash__(self): - return hash(self.base_type) + 27 # arbitrarily chosen offset - - def __eq__(self, other): - if isinstance(other, CType) and other.is_ptr: - return self.base_type.same_as(other.base_type) - return False - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "<CPtrType %s>" % repr(self.base_type) - - def same_as_resolved_type(self, other_type): - return ((other_type.is_ptr and - self.base_type.same_as(other_type.base_type)) - or other_type is error_type) - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - #print "CPtrType.declaration_code: pointer to", self.base_type ### - return self.base_type.declaration_code( - "*%s" % entity_code, - for_display, dll_linkage, pyrex) - - def assignable_from_resolved_type(self, other_type): - if other_type is error_type: - return 1 - if other_type.is_null_ptr: - return 1 - if self.base_type.is_const: - self = CPtrType(self.base_type.const_base_type) - if self.base_type.is_cfunction: - if other_type.is_ptr: - other_type = other_type.base_type.resolve() - if other_type.is_cfunction: - return self.base_type.pointer_assignable_from_resolved_type(other_type) - else: - return 0 - if (self.base_type.is_cpp_class and other_type.is_ptr - and other_type.base_type.is_cpp_class and other_type.base_type.is_subclass(self.base_type)): - return 1 - if other_type.is_array or other_type.is_ptr: - return self.base_type.is_void or self.base_type.same_as(other_type.base_type) - return 0 - - def specialize(self, values): - base_type = self.base_type.specialize(values) - if base_type == self.base_type: - return self - else: - return CPtrType(base_type) - - def deduce_template_params(self, actual): - if isinstance(actual, CPtrType): - return self.base_type.deduce_template_params(actual.base_type) - else: - return {} - - def invalid_value(self): - return "1" - - def find_cpp_operation_type(self, operator, operand_type=None): - if self.base_type.is_cpp_class: - return self.base_type.find_cpp_operation_type(operator, operand_type) - return None - - -class CNullPtrType(CPtrType): - - is_null_ptr = 1 - - -class CReferenceType(BaseType): - - is_reference = 1 - is_fake_reference = 0 - - def __init__(self, base_type): - self.ref_base_type = base_type - - def __repr__(self): - return "<CReferenceType %s>" % repr(self.ref_base_type) - - def __str__(self): - return "%s &" % self.ref_base_type - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - #print "CReferenceType.declaration_code: pointer to", self.base_type ### - return self.ref_base_type.declaration_code( - "&%s" % entity_code, - for_display, dll_linkage, pyrex) - - def specialize(self, values): - base_type = self.ref_base_type.specialize(values) - if base_type == self.ref_base_type: - return self - else: - return type(self)(base_type) - - def deduce_template_params(self, actual): - return self.ref_base_type.deduce_template_params(actual) - - def __getattr__(self, name): - return getattr(self.ref_base_type, name) - - -class CFakeReferenceType(CReferenceType): - - is_fake_reference = 1 - - def __repr__(self): - return "<CFakeReferenceType %s>" % repr(self.ref_base_type) - - def __str__(self): - return "%s [&]" % self.ref_base_type - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - #print "CReferenceType.declaration_code: pointer to", self.base_type ### - return "__Pyx_FakeReference<%s> %s" % (self.ref_base_type.empty_declaration_code(), entity_code) - - -class CFuncType(CType): - # return_type CType - # args [CFuncTypeArg] - # has_varargs boolean - # exception_value string - # exception_check boolean True if PyErr_Occurred check needed - # calling_convention string Function calling convention - # nogil boolean Can be called without gil - # with_gil boolean Acquire gil around function body - # templates [string] or None - # cached_specialized_types [CFuncType] cached specialized versions of the CFuncType if defined in a pxd - # from_fused boolean Indicates whether this is a specialized - # C function - # is_strict_signature boolean function refuses to accept coerced arguments - # (used for optimisation overrides) - # is_const_method boolean - # is_static_method boolean - - is_cfunction = 1 - original_sig = None - cached_specialized_types = None - from_fused = False - is_const_method = False - - subtypes = ['return_type', 'args'] - - def __init__(self, return_type, args, has_varargs = 0, - exception_value = None, exception_check = 0, calling_convention = "", - nogil = 0, with_gil = 0, is_overridable = 0, optional_arg_count = 0, - is_const_method = False, is_static_method=False, - templates = None, is_strict_signature = False): - self.return_type = return_type - self.args = args - self.has_varargs = has_varargs - self.optional_arg_count = optional_arg_count - self.exception_value = exception_value - self.exception_check = exception_check - self.calling_convention = calling_convention - self.nogil = nogil - self.with_gil = with_gil - self.is_overridable = is_overridable - self.is_const_method = is_const_method - self.is_static_method = is_static_method - self.templates = templates - self.is_strict_signature = is_strict_signature - - def __repr__(self): - arg_reprs = list(map(repr, self.args)) - if self.has_varargs: - arg_reprs.append("...") - if self.exception_value: - except_clause = " %r" % self.exception_value - else: - except_clause = "" - if self.exception_check: - except_clause += "?" - return "<CFuncType %s %s[%s]%s>" % ( - repr(self.return_type), - self.calling_convention_prefix(), - ",".join(arg_reprs), - except_clause) - - def with_with_gil(self, with_gil): - if with_gil == self.with_gil: - return self - else: - return CFuncType( - self.return_type, self.args, self.has_varargs, - self.exception_value, self.exception_check, - self.calling_convention, self.nogil, - with_gil, - self.is_overridable, self.optional_arg_count, - self.is_const_method, self.is_static_method, - self.templates, self.is_strict_signature) - - def calling_convention_prefix(self): - cc = self.calling_convention - if cc: - return cc + " " - else: - return "" - - def as_argument_type(self): - return c_ptr_type(self) - - def same_c_signature_as(self, other_type, as_cmethod = 0): - return self.same_c_signature_as_resolved_type( - other_type.resolve(), as_cmethod) - - def same_c_signature_as_resolved_type(self, other_type, as_cmethod=False, as_pxd_definition=False, - exact_semantics=True): - # If 'exact_semantics' is false, allow any equivalent C signatures - # if the Cython semantics are compatible, i.e. the same or wider for 'other_type'. - - #print "CFuncType.same_c_signature_as_resolved_type:", \ - # self, other_type, "as_cmethod =", as_cmethod ### - if other_type is error_type: - return 1 - if not other_type.is_cfunction: - return 0 - if self.is_overridable != other_type.is_overridable: - return 0 - nargs = len(self.args) - if nargs != len(other_type.args): - return 0 - # When comparing C method signatures, the first argument - # is exempt from compatibility checking (the proper check - # is performed elsewhere). - for i in range(as_cmethod, nargs): - if not self.args[i].type.same_as(other_type.args[i].type): - return 0 - if self.has_varargs != other_type.has_varargs: - return 0 - if self.optional_arg_count != other_type.optional_arg_count: - return 0 - if as_pxd_definition: - # A narrowing of the return type declared in the pxd is allowed. - if not self.return_type.subtype_of_resolved_type(other_type.return_type): - return 0 - else: - if not self.return_type.same_as(other_type.return_type): - return 0 - if not self.same_calling_convention_as(other_type): - return 0 - if exact_semantics: - if self.exception_check != other_type.exception_check: - return 0 - if not self._same_exception_value(other_type.exception_value): - return 0 - elif not self._is_exception_compatible_with(other_type): - return 0 - return 1 - - def _same_exception_value(self, other_exc_value): - if self.exception_value == other_exc_value: - return 1 - if self.exception_check != '+': - return 0 - if not self.exception_value or not other_exc_value: - return 0 - if self.exception_value.type != other_exc_value.type: - return 0 - if self.exception_value.entry and other_exc_value.entry: - if self.exception_value.entry.cname != other_exc_value.entry.cname: - return 0 - if self.exception_value.name != other_exc_value.name: - return 0 - return 1 - - def compatible_signature_with(self, other_type, as_cmethod = 0): - return self.compatible_signature_with_resolved_type(other_type.resolve(), as_cmethod) - - def compatible_signature_with_resolved_type(self, other_type, as_cmethod): - #print "CFuncType.same_c_signature_as_resolved_type:", \ - # self, other_type, "as_cmethod =", as_cmethod ### - if other_type is error_type: - return 1 - if not other_type.is_cfunction: - return 0 - if not self.is_overridable and other_type.is_overridable: - return 0 - nargs = len(self.args) - if nargs - self.optional_arg_count != len(other_type.args) - other_type.optional_arg_count: - return 0 - if self.optional_arg_count < other_type.optional_arg_count: - return 0 - # When comparing C method signatures, the first argument - # is exempt from compatibility checking (the proper check - # is performed elsewhere). - for i in range(as_cmethod, len(other_type.args)): - if not self.args[i].type.same_as( - other_type.args[i].type): - return 0 - if self.has_varargs != other_type.has_varargs: - return 0 - if not self.return_type.subtype_of_resolved_type(other_type.return_type): - return 0 - if not self.same_calling_convention_as(other_type): - return 0 - if self.nogil != other_type.nogil: - return 0 - if not self._is_exception_compatible_with(other_type): - return 0 - self.original_sig = other_type.original_sig or other_type - return 1 - - def _is_exception_compatible_with(self, other_type): - # narrower exception checks are ok, but prevent mismatches - if self.exception_check == '+' and other_type.exception_check != '+': - # must catch C++ exceptions if we raise them - return 0 - if not other_type.exception_check or other_type.exception_value is not None: - # if other does not *always* check exceptions, self must comply - if not self._same_exception_value(other_type.exception_value): - return 0 - if self.exception_check and self.exception_check != other_type.exception_check: - # a redundant exception check doesn't make functions incompatible, but a missing one does - return 0 - return 1 - - def narrower_c_signature_than(self, other_type, as_cmethod = 0): - return self.narrower_c_signature_than_resolved_type(other_type.resolve(), as_cmethod) - - def narrower_c_signature_than_resolved_type(self, other_type, as_cmethod): - if other_type is error_type: - return 1 - if not other_type.is_cfunction: - return 0 - nargs = len(self.args) - if nargs != len(other_type.args): - return 0 - for i in range(as_cmethod, nargs): - if not self.args[i].type.subtype_of_resolved_type(other_type.args[i].type): - return 0 - else: - self.args[i].needs_type_test = other_type.args[i].needs_type_test \ - or not self.args[i].type.same_as(other_type.args[i].type) - if self.has_varargs != other_type.has_varargs: - return 0 - if self.optional_arg_count != other_type.optional_arg_count: - return 0 - if not self.return_type.subtype_of_resolved_type(other_type.return_type): - return 0 - if not self.exception_check and other_type.exception_check: - # a redundant exception check doesn't make functions incompatible, but a missing one does - return 0 - if not self._same_exception_value(other_type.exception_value): - return 0 - return 1 - - def same_calling_convention_as(self, other): - ## XXX Under discussion ... - ## callspec_words = ("__stdcall", "__cdecl", "__fastcall") - ## cs1 = self.calling_convention - ## cs2 = other.calling_convention - ## if (cs1 in callspec_words or - ## cs2 in callspec_words): - ## return cs1 == cs2 - ## else: - ## return True - sc1 = self.calling_convention == '__stdcall' - sc2 = other.calling_convention == '__stdcall' - return sc1 == sc2 - - def same_as_resolved_type(self, other_type, as_cmethod=False): - return self.same_c_signature_as_resolved_type(other_type, as_cmethod=as_cmethod) \ - and self.nogil == other_type.nogil - - def pointer_assignable_from_resolved_type(self, rhs_type): - # Accept compatible exception/nogil declarations for the RHS. - if rhs_type is error_type: - return 1 - if not rhs_type.is_cfunction: - return 0 - return rhs_type.same_c_signature_as_resolved_type(self, exact_semantics=False) \ - and not (self.nogil and not rhs_type.nogil) - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0, - with_calling_convention = 1): - arg_decl_list = [] - for arg in self.args[:len(self.args)-self.optional_arg_count]: - arg_decl_list.append( - arg.type.declaration_code("", for_display, pyrex = pyrex)) - if self.is_overridable: - arg_decl_list.append("int %s" % Naming.skip_dispatch_cname) - if self.optional_arg_count: - arg_decl_list.append(self.op_arg_struct.declaration_code(Naming.optional_args_cname)) - if self.has_varargs: - arg_decl_list.append("...") - arg_decl_code = ", ".join(arg_decl_list) - if not arg_decl_code and not pyrex: - arg_decl_code = "void" - trailer = "" - if (pyrex or for_display) and not self.return_type.is_pyobject: - if self.exception_value and self.exception_check: - trailer = " except? %s" % self.exception_value - elif self.exception_value: - trailer = " except %s" % self.exception_value - elif self.exception_check == '+': - trailer = " except +" - elif self.exception_check and for_display: - # not spelled out by default, unless for human eyes - trailer = " except *" - if self.nogil: - trailer += " nogil" - if not with_calling_convention: - cc = '' - else: - cc = self.calling_convention_prefix() - if (not entity_code and cc) or entity_code.startswith("*"): - entity_code = "(%s%s)" % (cc, entity_code) - cc = "" - if self.is_const_method: - trailer += " const" - return self.return_type.declaration_code( - "%s%s(%s)%s" % (cc, entity_code, arg_decl_code, trailer), - for_display, dll_linkage, pyrex) - - def function_header_code(self, func_name, arg_code): - if self.is_const_method: - trailer = " const" - else: - trailer = "" - return "%s%s(%s)%s" % (self.calling_convention_prefix(), - func_name, arg_code, trailer) - - def signature_string(self): - s = self.empty_declaration_code() - return s - - def signature_cast_string(self): - s = self.declaration_code("(*)", with_calling_convention=False) - return '(%s)' % s - - def specialize(self, values): - result = CFuncType(self.return_type.specialize(values), - [arg.specialize(values) for arg in self.args], - has_varargs = self.has_varargs, - exception_value = self.exception_value, - exception_check = self.exception_check, - calling_convention = self.calling_convention, - nogil = self.nogil, - with_gil = self.with_gil, - is_overridable = self.is_overridable, - optional_arg_count = self.optional_arg_count, - is_const_method = self.is_const_method, - is_static_method = self.is_static_method, - templates = self.templates) - - result.from_fused = self.is_fused - return result - - def opt_arg_cname(self, arg_name): - return self.op_arg_struct.base_type.scope.lookup(arg_name).cname - - # Methods that deal with Fused Types - # All but map_with_specific_entries should be called only on functions - # with fused types (and not on their corresponding specific versions). - - def get_all_specialized_permutations(self, fused_types=None): - """ - Permute all the types. For every specific instance of a fused type, we - want all other specific instances of all other fused types. - - It returns an iterable of two-tuples of the cname that should prefix - the cname of the function, and a dict mapping any fused types to their - respective specific types. - """ - assert self.is_fused - - if fused_types is None: - fused_types = self.get_fused_types() - - return get_all_specialized_permutations(fused_types) - - def get_all_specialized_function_types(self): - """ - Get all the specific function types of this one. - """ - assert self.is_fused - - if self.entry.fused_cfunction: - return [n.type for n in self.entry.fused_cfunction.nodes] - elif self.cached_specialized_types is not None: - return self.cached_specialized_types - - result = [] - permutations = self.get_all_specialized_permutations() - - new_cfunc_entries = [] - for cname, fused_to_specific in permutations: - new_func_type = self.entry.type.specialize(fused_to_specific) - - if self.optional_arg_count: - # Remember, this method is set by CFuncDeclaratorNode - self.declare_opt_arg_struct(new_func_type, cname) - - new_entry = copy.deepcopy(self.entry) - new_func_type.specialize_entry(new_entry, cname) - - new_entry.type = new_func_type - new_func_type.entry = new_entry - result.append(new_func_type) - - new_cfunc_entries.append(new_entry) - - cfunc_entries = self.entry.scope.cfunc_entries - try: - cindex = cfunc_entries.index(self.entry) - except ValueError: - cfunc_entries.extend(new_cfunc_entries) - else: - cfunc_entries[cindex:cindex+1] = new_cfunc_entries - - self.cached_specialized_types = result - - return result - - def get_fused_types(self, result=None, seen=None, subtypes=None): - """Return fused types in the order they appear as parameter types""" - return super(CFuncType, self).get_fused_types(result, seen, - subtypes=['args']) - - def specialize_entry(self, entry, cname): - assert not self.is_fused - specialize_entry(entry, cname) - - def can_coerce_to_pyobject(self, env): - # duplicating the decisions from create_to_py_utility_code() here avoids writing out unused code - if self.has_varargs or self.optional_arg_count: - return False - if self.to_py_function is not None: - return self.to_py_function - for arg in self.args: - if not arg.type.is_pyobject and not arg.type.can_coerce_to_pyobject(env): - return False - if not self.return_type.is_pyobject and not self.return_type.can_coerce_to_pyobject(env): - return False - return True - - def create_to_py_utility_code(self, env): - # FIXME: it seems we're trying to coerce in more cases than we should - if self.to_py_function is not None: - return self.to_py_function - if not self.can_coerce_to_pyobject(env): - return False - from .UtilityCode import CythonUtilityCode - safe_typename = re.sub('[^a-zA-Z0-9]', '__', self.declaration_code("", pyrex=1)) - to_py_function = "__Pyx_CFunc_%s_to_py" % safe_typename - - for arg in self.args: - if not arg.type.is_pyobject and not arg.type.create_from_py_utility_code(env): - return False - if not self.return_type.is_pyobject and not self.return_type.create_to_py_utility_code(env): - return False - - def declared_type(ctype): - type_displayname = str(ctype.declaration_code("", for_display=True)) - if ctype.is_pyobject: - arg_ctype = type_name = type_displayname - if ctype.is_builtin_type: - arg_ctype = ctype.name - elif not ctype.is_extension_type: - type_name = 'object' - type_displayname = None - else: - type_displayname = repr(type_displayname) - elif ctype is c_bint_type: - type_name = arg_ctype = 'bint' - else: - type_name = arg_ctype = type_displayname - if ctype is c_double_type: - type_displayname = 'float' - else: - type_displayname = repr(type_displayname) - return type_name, arg_ctype, type_displayname - - class Arg(object): - def __init__(self, arg_name, arg_type): - self.name = arg_name - self.type = arg_type - self.type_cname, self.ctype, self.type_displayname = declared_type(arg_type) - - if self.return_type.is_void: - except_clause = 'except *' - elif self.return_type.is_pyobject: - except_clause = '' - elif self.exception_value: - except_clause = ('except? %s' if self.exception_check else 'except %s') % self.exception_value - else: - except_clause = 'except *' - - context = { - 'cname': to_py_function, - 'args': [Arg(arg.name or 'arg%s' % ix, arg.type) for ix, arg in enumerate(self.args)], - 'return_type': Arg('return', self.return_type), - 'except_clause': except_clause, - } - # FIXME: directives come from first defining environment and do not adapt for reuse - env.use_utility_code(CythonUtilityCode.load( - "cfunc.to_py", "CConvert.pyx", - outer_module_scope=env.global_scope(), # need access to types declared in module - context=context, compiler_directives=dict(env.global_scope().directives))) - self.to_py_function = to_py_function - return True - - -def specialize_entry(entry, cname): - """ - Specialize an entry of a copied fused function or method - """ - entry.is_fused_specialized = True - entry.name = get_fused_cname(cname, entry.name) - - if entry.is_cmethod: - entry.cname = entry.name - if entry.is_inherited: - entry.cname = StringEncoding.EncodedString( - "%s.%s" % (Naming.obj_base_cname, entry.cname)) - else: - entry.cname = get_fused_cname(cname, entry.cname) - - if entry.func_cname: - entry.func_cname = get_fused_cname(cname, entry.func_cname) - -def get_fused_cname(fused_cname, orig_cname): - """ - Given the fused cname id and an original cname, return a specialized cname - """ - assert fused_cname and orig_cname - return StringEncoding.EncodedString('%s%s%s' % (Naming.fused_func_prefix, - fused_cname, orig_cname)) - -def unique(somelist): - seen = set() - result = [] - for obj in somelist: - if obj not in seen: - result.append(obj) - seen.add(obj) - - return result - -def get_all_specialized_permutations(fused_types): - return _get_all_specialized_permutations(unique(fused_types)) - -def _get_all_specialized_permutations(fused_types, id="", f2s=()): - fused_type, = fused_types[0].get_fused_types() - result = [] - - for newid, specific_type in enumerate(fused_type.types): - # f2s = dict(f2s, **{ fused_type: specific_type }) - f2s = dict(f2s) - f2s.update({ fused_type: specific_type }) - - if id: - cname = '%s_%s' % (id, newid) - else: - cname = str(newid) - - if len(fused_types) > 1: - result.extend(_get_all_specialized_permutations( - fused_types[1:], cname, f2s)) - else: - result.append((cname, f2s)) - - return result - -def specialization_signature_string(fused_compound_type, fused_to_specific): - """ - Return the signature for a specialization of a fused type. e.g. - - floating[:] -> - 'float' or 'double' - - cdef fused ft: - float[:] - double[:] - - ft -> - 'float[:]' or 'double[:]' - - integral func(floating) -> - 'int (*func)(float)' or ... - """ - fused_types = fused_compound_type.get_fused_types() - if len(fused_types) == 1: - fused_type = fused_types[0] - else: - fused_type = fused_compound_type - - return fused_type.specialize(fused_to_specific).typeof_name() - - -def get_specialized_types(type): - """ - Return a list of specialized types in their declared order. - """ - assert type.is_fused - - if isinstance(type, FusedType): - result = list(type.types) - for specialized_type in result: - specialized_type.specialization_string = specialized_type.typeof_name() - else: - result = [] - for cname, f2s in get_all_specialized_permutations(type.get_fused_types()): - specialized_type = type.specialize(f2s) - specialized_type.specialization_string = ( - specialization_signature_string(type, f2s)) - result.append(specialized_type) - - return result - - -class CFuncTypeArg(BaseType): - # name string - # cname string - # type PyrexType - # pos source file position - - # FIXME: is this the right setup? should None be allowed here? - not_none = False - or_none = False - accept_none = True - accept_builtin_subtypes = False - annotation = None - - subtypes = ['type'] - - def __init__(self, name, type, pos, cname=None, annotation=None): - self.name = name - if cname is not None: - self.cname = cname - else: - self.cname = Naming.var_prefix + name - if annotation is not None: - self.annotation = annotation - self.type = type - self.pos = pos - self.needs_type_test = False # TODO: should these defaults be set in analyse_types()? - - def __repr__(self): - return "%s:%s" % (self.name, repr(self.type)) - - def declaration_code(self, for_display = 0): - return self.type.declaration_code(self.cname, for_display) - - def specialize(self, values): - return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname) - - -class ToPyStructUtilityCode(object): - - requires = None - - def __init__(self, type, forward_decl, env): - self.type = type - self.header = "static PyObject* %s(%s)" % (type.to_py_function, - type.declaration_code('s')) - self.forward_decl = forward_decl - self.env = env - - def __eq__(self, other): - return isinstance(other, ToPyStructUtilityCode) and self.header == other.header - - def __hash__(self): - return hash(self.header) - - def get_tree(self, **kwargs): - pass - - def put_code(self, output): - code = output['utility_code_def'] - proto = output['utility_code_proto'] - - code.putln("%s {" % self.header) - code.putln("PyObject* res;") - code.putln("PyObject* member;") - code.putln("res = __Pyx_PyDict_NewPresized(%d); if (unlikely(!res)) return NULL;" % - len(self.type.scope.var_entries)) - for member in self.type.scope.var_entries: - nameconst_cname = code.get_py_string_const(member.name, identifier=True) - code.putln("%s; if (unlikely(!member)) goto bad;" % ( - member.type.to_py_call_code('s.%s' % member.cname, 'member', member.type))) - code.putln("if (unlikely(PyDict_SetItem(res, %s, member) < 0)) goto bad;" % nameconst_cname) - code.putln("Py_DECREF(member);") - code.putln("return res;") - code.putln("bad:") - code.putln("Py_XDECREF(member);") - code.putln("Py_DECREF(res);") - code.putln("return NULL;") - code.putln("}") - - # This is a bit of a hack, we need a forward declaration - # due to the way things are ordered in the module... - if self.forward_decl: - proto.putln(self.type.empty_declaration_code() + ';') - proto.putln(self.header + ";") - - def inject_tree_and_scope_into(self, module_node): - pass - - -class CStructOrUnionType(CType): - # name string - # cname string - # kind string "struct" or "union" - # scope StructOrUnionScope, or None if incomplete - # typedef_flag boolean - # packed boolean - - # entry Entry - - is_struct_or_union = 1 - has_attributes = 1 - exception_check = True - - def __init__(self, name, kind, scope, typedef_flag, cname, packed=False): - self.name = name - self.cname = cname - self.kind = kind - self.scope = scope - self.typedef_flag = typedef_flag - self.is_struct = kind == 'struct' - self.to_py_function = "%s_to_py_%s" % ( - Naming.convert_func_prefix, self.specialization_name()) - self.from_py_function = "%s_from_py_%s" % ( - Naming.convert_func_prefix, self.specialization_name()) - self.exception_check = True - self._convert_to_py_code = None - self._convert_from_py_code = None - self.packed = packed - - def can_coerce_to_pyobject(self, env): - if self._convert_to_py_code is False: - return None # tri-state-ish - - if env.outer_scope is None: - return False - - if self._convert_to_py_code is None: - is_union = not self.is_struct - unsafe_union_types = set() - safe_union_types = set() - for member in self.scope.var_entries: - member_type = member.type - if not member_type.can_coerce_to_pyobject(env): - self.to_py_function = None - self._convert_to_py_code = False - return False - if is_union: - if member_type.is_ptr or member_type.is_cpp_class: - unsafe_union_types.add(member_type) - else: - safe_union_types.add(member_type) - - if unsafe_union_types and (safe_union_types or len(unsafe_union_types) > 1): - # unsafe mix of safe and unsafe to convert types - self.from_py_function = None - self._convert_from_py_code = False - return False - - return True - - def create_to_py_utility_code(self, env): - if not self.can_coerce_to_pyobject(env): - return False - - if self._convert_to_py_code is None: - for member in self.scope.var_entries: - member.type.create_to_py_utility_code(env) - forward_decl = self.entry.visibility != 'extern' and not self.typedef_flag - self._convert_to_py_code = ToPyStructUtilityCode(self, forward_decl, env) - - env.use_utility_code(self._convert_to_py_code) - return True - - def can_coerce_from_pyobject(self, env): - if env.outer_scope is None or self._convert_from_py_code is False: - return False - for member in self.scope.var_entries: - if not member.type.can_coerce_from_pyobject(env): - return False - return True - - def create_from_py_utility_code(self, env): - if env.outer_scope is None: - return False - - if self._convert_from_py_code is False: - return None # tri-state-ish - - if self._convert_from_py_code is None: - if not self.scope.var_entries: - # There are obviously missing fields; don't allow instantiation - # where absolutely no content is provided. - return False - - for member in self.scope.var_entries: - if not member.type.create_from_py_utility_code(env): - self.from_py_function = None - self._convert_from_py_code = False - return False - - context = dict( - struct_type=self, - var_entries=self.scope.var_entries, - funcname=self.from_py_function, - ) - from .UtilityCode import CythonUtilityCode - self._convert_from_py_code = CythonUtilityCode.load( - "FromPyStructUtility" if self.is_struct else "FromPyUnionUtility", - "CConvert.pyx", - outer_module_scope=env.global_scope(), # need access to types declared in module - context=context) - - env.use_utility_code(self._convert_from_py_code) - return True - - def __repr__(self): - return "<CStructOrUnionType %s %s%s>" % ( - self.name, self.cname, - ("", " typedef")[self.typedef_flag]) - - def declaration_code(self, entity_code, - for_display=0, dll_linkage=None, pyrex=0): - if pyrex or for_display: - base_code = self.name - else: - if self.typedef_flag: - base_code = self.cname - else: - base_code = "%s %s" % (self.kind, self.cname) - base_code = public_decl(base_code, dll_linkage) - return self.base_declaration_code(base_code, entity_code) - - def __eq__(self, other): - try: - return (isinstance(other, CStructOrUnionType) and - self.name == other.name) - except AttributeError: - return False - - def __lt__(self, other): - try: - return self.name < other.name - except AttributeError: - # this is arbitrary, but it makes sure we always have - # *some* kind of order - return False - - def __hash__(self): - return hash(self.cname) ^ hash(self.kind) - - def is_complete(self): - return self.scope is not None - - def attributes_known(self): - return self.is_complete() - - def can_be_complex(self): - # Does the struct consist of exactly two identical floats? - fields = self.scope.var_entries - if len(fields) != 2: return False - a, b = fields - return (a.type.is_float and b.type.is_float and - a.type.empty_declaration_code() == - b.type.empty_declaration_code()) - - def struct_nesting_depth(self): - child_depths = [x.type.struct_nesting_depth() - for x in self.scope.var_entries] - return max(child_depths) + 1 - - def cast_code(self, expr_code): - if self.is_struct: - return expr_code - return super(CStructOrUnionType, self).cast_code(expr_code) - -cpp_string_conversions = ("std::string",) - -builtin_cpp_conversions = { - # type element template params - "std::pair": 2, - "std::vector": 1, - "std::list": 1, - "std::set": 1, - "std::unordered_set": 1, - "std::map": 2, - "std::unordered_map": 2, - "std::complex": 1, -} - -class CppClassType(CType): - # name string - # cname string - # scope CppClassScope - # templates [string] or None - - is_cpp_class = 1 - has_attributes = 1 - exception_check = True - namespace = None - - # For struct-like declaration. - kind = "struct" - packed = False - typedef_flag = False - - subtypes = ['templates'] - - def __init__(self, name, scope, cname, base_classes, templates=None, template_type=None): - self.name = name - self.cname = cname - self.scope = scope - self.base_classes = base_classes - self.operators = [] - self.templates = templates - self.template_type = template_type - self.num_optional_templates = sum(is_optional_template_param(T) for T in templates or ()) - if templates: - self.specializations = {tuple(zip(templates, templates)): self} - else: - self.specializations = {} - self.is_cpp_string = cname in cpp_string_conversions - - def use_conversion_utility(self, from_or_to): - pass - - def maybe_unordered(self): - if 'unordered' in self.cname: - return 'unordered_' - else: - return '' - - def can_coerce_from_pyobject(self, env): - if self.cname in builtin_cpp_conversions: - template_count = builtin_cpp_conversions[self.cname] - for ix, T in enumerate(self.templates or []): - if ix >= template_count: - break - if T.is_pyobject or not T.can_coerce_from_pyobject(env): - return False - return True - elif self.cname in cpp_string_conversions: - return True - return False - - def create_from_py_utility_code(self, env): - if self.from_py_function is not None: - return True - if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions: - X = "XYZABC" - tags = [] - context = {} - for ix, T in enumerate(self.templates or []): - if ix >= builtin_cpp_conversions[self.cname]: - break - if T.is_pyobject or not T.create_from_py_utility_code(env): - return False - tags.append(T.specialization_name()) - context[X[ix]] = T - - if self.cname in cpp_string_conversions: - cls = 'string' - tags = type_identifier(self), - else: - cls = self.cname[5:] - cname = '__pyx_convert_%s_from_py_%s' % (cls, '__and_'.join(tags)) - context.update({ - 'cname': cname, - 'maybe_unordered': self.maybe_unordered(), - 'type': self.cname, - }) - from .UtilityCode import CythonUtilityCode - env.use_utility_code(CythonUtilityCode.load( - cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx", - context=context, compiler_directives=env.directives)) - self.from_py_function = cname - return True - - def can_coerce_to_pyobject(self, env): - if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions: - for ix, T in enumerate(self.templates or []): - if ix >= builtin_cpp_conversions[self.cname]: - break - if T.is_pyobject or not T.can_coerce_to_pyobject(env): - return False - return True - - - def create_to_py_utility_code(self, env): - if self.to_py_function is not None: - return True - if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions: - X = "XYZABC" - tags = [] - context = {} - for ix, T in enumerate(self.templates or []): - if ix >= builtin_cpp_conversions[self.cname]: - break - if not T.create_to_py_utility_code(env): - return False - tags.append(T.specialization_name()) - context[X[ix]] = T - - if self.cname in cpp_string_conversions: - cls = 'string' - prefix = 'PyObject_' # gets specialised by explicit type casts in CoerceToPyTypeNode - tags = type_identifier(self), - else: - cls = self.cname[5:] - prefix = '' - cname = "__pyx_convert_%s%s_to_py_%s" % (prefix, cls, "____".join(tags)) - context.update({ - 'cname': cname, - 'maybe_unordered': self.maybe_unordered(), - 'type': self.cname, - }) - from .UtilityCode import CythonUtilityCode - env.use_utility_code(CythonUtilityCode.load( - cls.replace('unordered_', '') + ".to_py", "CppConvert.pyx", - context=context, compiler_directives=env.directives)) - self.to_py_function = cname - return True - - def is_template_type(self): - return self.templates is not None and self.template_type is None - - def get_fused_types(self, result=None, seen=None): - if result is None: - result = [] - seen = set() - if self.namespace: - self.namespace.get_fused_types(result, seen) - if self.templates: - for T in self.templates: - T.get_fused_types(result, seen) - return result - - def specialize_here(self, pos, template_values=None): - if not self.is_template_type(): - error(pos, "'%s' type is not a template" % self) - return error_type - if len(self.templates) - self.num_optional_templates <= len(template_values) < len(self.templates): - num_defaults = len(self.templates) - len(template_values) - partial_specialization = self.declaration_code('', template_params=template_values) - # Most of the time we don't need to declare anything typed to these - # default template arguments, but when we do there's no way in C++ - # to reference this directly. However, it is common convention to - # provide a typedef in the template class that resolves to each - # template type. For now, allow the user to specify this name as - # the template parameter. - # TODO: Allow typedefs in cpp classes and search for it in this - # classes scope as a concrete name we could use. - template_values = template_values + [ - TemplatePlaceholderType( - "%s::%s" % (partial_specialization, param.name), True) - for param in self.templates[-num_defaults:]] - if len(self.templates) != len(template_values): - error(pos, "%s templated type receives %d arguments, got %d" % - (self.name, len(self.templates), len(template_values))) - return error_type - has_object_template_param = False - for value in template_values: - if value.is_pyobject: - has_object_template_param = True - error(pos, - "Python object type '%s' cannot be used as a template argument" % value) - if has_object_template_param: - return error_type - return self.specialize(dict(zip(self.templates, template_values))) - - def specialize(self, values): - if not self.templates and not self.namespace: - return self - if self.templates is None: - self.templates = [] - key = tuple(values.items()) - if key in self.specializations: - return self.specializations[key] - template_values = [t.specialize(values) for t in self.templates] - specialized = self.specializations[key] = \ - CppClassType(self.name, None, self.cname, [], template_values, template_type=self) - # Need to do these *after* self.specializations[key] is set - # to avoid infinite recursion on circular references. - specialized.base_classes = [b.specialize(values) for b in self.base_classes] - if self.namespace is not None: - specialized.namespace = self.namespace.specialize(values) - specialized.scope = self.scope.specialize(values, specialized) - if self.cname == 'std::vector': - # vector<bool> is special cased in the C++ standard, and its - # accessors do not necessarily return references to the underlying - # elements (which may be bit-packed). - # http://www.cplusplus.com/reference/vector/vector-bool/ - # Here we pretend that the various methods return bool values - # (as the actual returned values are coercable to such, and - # we don't support call expressions as lvalues). - T = values.get(self.templates[0], None) - if T and not T.is_fused and T.empty_declaration_code() == 'bool': - for bit_ref_returner in ('at', 'back', 'front'): - if bit_ref_returner in specialized.scope.entries: - specialized.scope.entries[bit_ref_returner].type.return_type = T - return specialized - - def deduce_template_params(self, actual): - if actual.is_const: - actual = actual.const_base_type - if actual.is_reference: - actual = actual.ref_base_type - if self == actual: - return {} - elif actual.is_cpp_class: - self_template_type = self - while getattr(self_template_type, 'template_type', None): - self_template_type = self_template_type.template_type - def all_bases(cls): - yield cls - for parent in cls.base_classes: - for base in all_bases(parent): - yield base - for actual_base in all_bases(actual): - template_type = actual_base - while getattr(template_type, 'template_type', None): - template_type = template_type.template_type - if (self_template_type.empty_declaration_code() - == template_type.empty_declaration_code()): - return reduce( - merge_template_deductions, - [formal_param.deduce_template_params(actual_param) - for (formal_param, actual_param) - in zip(self.templates, actual_base.templates)], - {}) - else: - return {} - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0, - template_params = None): - if template_params is None: - template_params = self.templates - if self.templates: - template_strings = [param.declaration_code('', for_display, None, pyrex) - for param in template_params - if not is_optional_template_param(param) and not param.is_fused] - if for_display: - brackets = "[%s]" - else: - brackets = "<%s> " - templates = brackets % ",".join(template_strings) - else: - templates = "" - if pyrex or for_display: - base_code = "%s%s" % (self.name, templates) - else: - base_code = "%s%s" % (self.cname, templates) - if self.namespace is not None: - base_code = "%s::%s" % (self.namespace.empty_declaration_code(), base_code) - base_code = public_decl(base_code, dll_linkage) - return self.base_declaration_code(base_code, entity_code) - - def is_subclass(self, other_type): - if self.same_as_resolved_type(other_type): - return 1 - for base_class in self.base_classes: - if base_class.is_subclass(other_type): - return 1 - return 0 - - def subclass_dist(self, super_type): - if self.same_as_resolved_type(super_type): - return 0 - elif not self.base_classes: - return float('inf') - else: - return 1 + min(b.subclass_dist(super_type) for b in self.base_classes) - - def same_as_resolved_type(self, other_type): - if other_type.is_cpp_class: - if self == other_type: - return 1 - # This messy logic is needed due to GH Issue #1852. - elif (self.cname == other_type.cname and - (self.template_type and other_type.template_type - or self.templates - or other_type.templates)): - if self.templates == other_type.templates: - return 1 - for t1, t2 in zip(self.templates, other_type.templates): - if is_optional_template_param(t1) and is_optional_template_param(t2): - break - if not t1.same_as_resolved_type(t2): - return 0 - return 1 - return 0 - - def assignable_from_resolved_type(self, other_type): - # TODO: handle operator=(...) here? - if other_type is error_type: - return True - elif other_type.is_cpp_class: - return other_type.is_subclass(self) - elif other_type.is_string and self.cname in cpp_string_conversions: - return True - - def attributes_known(self): - return self.scope is not None - - def find_cpp_operation_type(self, operator, operand_type=None): - operands = [self] - if operand_type is not None: - operands.append(operand_type) - # pos == None => no errors - operator_entry = self.scope.lookup_operator_for_types(None, operator, operands) - if not operator_entry: - return None - func_type = operator_entry.type - if func_type.is_ptr: - func_type = func_type.base_type - return func_type.return_type - - def get_constructor(self, pos): - constructor = self.scope.lookup('<init>') - if constructor is not None: - return constructor - - # Otherwise: automatically declare no-args default constructor. - # Make it "nogil" if the base classes allow it. - nogil = True - for base in self.base_classes: - base_constructor = base.scope.lookup('<init>') - if base_constructor and not base_constructor.type.nogil: - nogil = False - break - - func_type = CFuncType(self, [], exception_check='+', nogil=nogil) - return self.scope.declare_cfunction(u'<init>', func_type, pos) - - def check_nullary_constructor(self, pos, msg="stack allocated"): - constructor = self.scope.lookup(u'<init>') - if constructor is not None and best_match([], constructor.all_alternatives()) is None: - error(pos, "C++ class must have a nullary constructor to be %s" % msg) - - -class TemplatePlaceholderType(CType): - - def __init__(self, name, optional=False): - self.name = name - self.optional = optional - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - if entity_code: - return self.name + " " + entity_code - else: - return self.name - - def specialize(self, values): - if self in values: - return values[self] - else: - return self - - def deduce_template_params(self, actual): - return {self: actual} - - def same_as_resolved_type(self, other_type): - if isinstance(other_type, TemplatePlaceholderType): - return self.name == other_type.name - else: - return 0 - - def __hash__(self): - return hash(self.name) - - def __cmp__(self, other): - if isinstance(other, TemplatePlaceholderType): - return cmp(self.name, other.name) - else: - return cmp(type(self), type(other)) - - def __eq__(self, other): - if isinstance(other, TemplatePlaceholderType): - return self.name == other.name - else: - return False - -def is_optional_template_param(type): - return isinstance(type, TemplatePlaceholderType) and type.optional - - -class CEnumType(CIntLike, CType): - # name string - # cname string or None - # typedef_flag boolean - # values [string], populated during declaration analysis - - is_enum = 1 - signed = 1 - rank = -1 # Ranks below any integer type - - def __init__(self, name, cname, typedef_flag, namespace=None): - self.name = name - self.cname = cname - self.values = [] - self.typedef_flag = typedef_flag - self.namespace = namespace - self.default_value = "(%s) 0" % self.empty_declaration_code() - - def __str__(self): - return self.name - - def __repr__(self): - return "<CEnumType %s %s%s>" % (self.name, self.cname, - ("", " typedef")[self.typedef_flag]) - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - if pyrex or for_display: - base_code = self.name - else: - if self.namespace: - base_code = "%s::%s" % ( - self.namespace.empty_declaration_code(), self.cname) - elif self.typedef_flag: - base_code = self.cname - else: - base_code = "enum %s" % self.cname - base_code = public_decl(base_code, dll_linkage) - return self.base_declaration_code(base_code, entity_code) - - def specialize(self, values): - if self.namespace: - namespace = self.namespace.specialize(values) - if namespace != self.namespace: - return CEnumType( - self.name, self.cname, self.typedef_flag, namespace) - return self - - def create_type_wrapper(self, env): - from .UtilityCode import CythonUtilityCode - env.use_utility_code(CythonUtilityCode.load( - "EnumType", "CpdefEnums.pyx", - context={"name": self.name, - "items": tuple(self.values)}, - outer_module_scope=env.global_scope())) - - -class CTupleType(CType): - # components [PyrexType] - - is_ctuple = True - - def __init__(self, cname, components): - self.cname = cname - self.components = components - self.size = len(components) - self.to_py_function = "%s_to_py_%s" % (Naming.convert_func_prefix, self.cname) - self.from_py_function = "%s_from_py_%s" % (Naming.convert_func_prefix, self.cname) - self.exception_check = True - self._convert_to_py_code = None - self._convert_from_py_code = None - - def __str__(self): - return "(%s)" % ", ".join(str(c) for c in self.components) - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - if pyrex or for_display: - return str(self) - else: - return self.base_declaration_code(self.cname, entity_code) - - def can_coerce_to_pyobject(self, env): - for component in self.components: - if not component.can_coerce_to_pyobject(env): - return False - return True - - def can_coerce_from_pyobject(self, env): - for component in self.components: - if not component.can_coerce_from_pyobject(env): - return False - return True - - def create_to_py_utility_code(self, env): - if self._convert_to_py_code is False: - return None # tri-state-ish - - if self._convert_to_py_code is None: - for component in self.components: - if not component.create_to_py_utility_code(env): - self.to_py_function = None - self._convert_to_py_code = False - return False - - context = dict( - struct_type_decl=self.empty_declaration_code(), - components=self.components, - funcname=self.to_py_function, - size=len(self.components) - ) - self._convert_to_py_code = TempitaUtilityCode.load( - "ToPyCTupleUtility", "TypeConversion.c", context=context) - - env.use_utility_code(self._convert_to_py_code) - return True - - def create_from_py_utility_code(self, env): - if self._convert_from_py_code is False: - return None # tri-state-ish - - if self._convert_from_py_code is None: - for component in self.components: - if not component.create_from_py_utility_code(env): - self.from_py_function = None - self._convert_from_py_code = False - return False - - context = dict( - struct_type_decl=self.empty_declaration_code(), - components=self.components, - funcname=self.from_py_function, - size=len(self.components) - ) - self._convert_from_py_code = TempitaUtilityCode.load( - "FromPyCTupleUtility", "TypeConversion.c", context=context) - - env.use_utility_code(self._convert_from_py_code) - return True - - def cast_code(self, expr_code): - return expr_code - - -def c_tuple_type(components): - components = tuple(components) - cname = Naming.ctuple_type_prefix + type_list_identifier(components) - tuple_type = CTupleType(cname, components) - return tuple_type - - -class UnspecifiedType(PyrexType): - # Used as a placeholder until the type can be determined. - - is_unspecified = 1 - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - return "<unspecified>" - - def same_as_resolved_type(self, other_type): - return False - - -class ErrorType(PyrexType): - # Used to prevent propagation of error messages. - - is_error = 1 - exception_value = "0" - exception_check = 0 - to_py_function = "dummy" - from_py_function = "dummy" - - def create_to_py_utility_code(self, env): - return True - - def create_from_py_utility_code(self, env): - return True - - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): - return "<error>" - - def same_as_resolved_type(self, other_type): - return 1 - - def error_condition(self, result_code): - return "dummy" - - -rank_to_type_name = ( - "char", # 0 - "short", # 1 - "int", # 2 - "long", # 3 - "PY_LONG_LONG", # 4 - "float", # 5 - "double", # 6 - "long double", # 7 -) - -_rank_to_type_name = list(rank_to_type_name) -RANK_INT = _rank_to_type_name.index('int') -RANK_LONG = _rank_to_type_name.index('long') -RANK_FLOAT = _rank_to_type_name.index('float') -UNSIGNED = 0 -SIGNED = 2 - -error_type = ErrorType() -unspecified_type = UnspecifiedType() - -py_object_type = PyObjectType() - -c_void_type = CVoidType() - -c_uchar_type = CIntType(0, UNSIGNED) -c_ushort_type = CIntType(1, UNSIGNED) -c_uint_type = CIntType(2, UNSIGNED) -c_ulong_type = CIntType(3, UNSIGNED) -c_ulonglong_type = CIntType(4, UNSIGNED) - -c_char_type = CIntType(0) -c_short_type = CIntType(1) -c_int_type = CIntType(2) -c_long_type = CIntType(3) -c_longlong_type = CIntType(4) - -c_schar_type = CIntType(0, SIGNED) -c_sshort_type = CIntType(1, SIGNED) -c_sint_type = CIntType(2, SIGNED) -c_slong_type = CIntType(3, SIGNED) -c_slonglong_type = CIntType(4, SIGNED) - -c_float_type = CFloatType(5, math_h_modifier='f') -c_double_type = CFloatType(6) -c_longdouble_type = CFloatType(7, math_h_modifier='l') - -c_float_complex_type = CComplexType(c_float_type) -c_double_complex_type = CComplexType(c_double_type) -c_longdouble_complex_type = CComplexType(c_longdouble_type) - -c_anon_enum_type = CAnonEnumType(-1) -c_returncode_type = CReturnCodeType(RANK_INT) -c_bint_type = CBIntType(RANK_INT) -c_py_unicode_type = CPyUnicodeIntType(RANK_INT-0.5, UNSIGNED) -c_py_ucs4_type = CPyUCS4IntType(RANK_LONG-0.5, UNSIGNED) -c_py_hash_t_type = CPyHashTType(RANK_LONG+0.5, SIGNED) -c_py_ssize_t_type = CPySSizeTType(RANK_LONG+0.5, SIGNED) -c_ssize_t_type = CSSizeTType(RANK_LONG+0.5, SIGNED) -c_size_t_type = CSizeTType(RANK_LONG+0.5, UNSIGNED) -c_ptrdiff_t_type = CPtrdiffTType(RANK_LONG+0.75, SIGNED) - -c_null_ptr_type = CNullPtrType(c_void_type) -c_void_ptr_type = CPtrType(c_void_type) -c_void_ptr_ptr_type = CPtrType(c_void_ptr_type) -c_char_ptr_type = CPtrType(c_char_type) -c_const_char_ptr_type = CPtrType(CConstType(c_char_type)) -c_uchar_ptr_type = CPtrType(c_uchar_type) -c_const_uchar_ptr_type = CPtrType(CConstType(c_uchar_type)) -c_char_ptr_ptr_type = CPtrType(c_char_ptr_type) -c_int_ptr_type = CPtrType(c_int_type) -c_py_unicode_ptr_type = CPtrType(c_py_unicode_type) -c_const_py_unicode_ptr_type = CPtrType(CConstType(c_py_unicode_type)) -c_py_ssize_t_ptr_type = CPtrType(c_py_ssize_t_type) -c_ssize_t_ptr_type = CPtrType(c_ssize_t_type) -c_size_t_ptr_type = CPtrType(c_size_t_type) - -# GIL state -c_gilstate_type = CEnumType("PyGILState_STATE", "PyGILState_STATE", True) -c_threadstate_type = CStructOrUnionType("PyThreadState", "struct", None, 1, "PyThreadState") -c_threadstate_ptr_type = CPtrType(c_threadstate_type) - -# PEP-539 "Py_tss_t" type -c_pytss_t_type = CPyTSSTType() - -# the Py_buffer type is defined in Builtin.py -c_py_buffer_type = CStructOrUnionType("Py_buffer", "struct", None, 1, "Py_buffer") -c_py_buffer_ptr_type = CPtrType(c_py_buffer_type) - -# Not sure whether the unsigned versions and 'long long' should be in there -# long long requires C99 and might be slow, and would always get preferred -# when specialization happens through calling and not indexing -cy_integral_type = FusedType([c_short_type, c_int_type, c_long_type], - name="integral") -# Omitting long double as it might be slow -cy_floating_type = FusedType([c_float_type, c_double_type], name="floating") -cy_numeric_type = FusedType([c_short_type, - c_int_type, - c_long_type, - c_float_type, - c_double_type, - c_float_complex_type, - c_double_complex_type], name="numeric") - -# buffer-related structs -c_buf_diminfo_type = CStructOrUnionType("__Pyx_Buf_DimInfo", "struct", - None, 1, "__Pyx_Buf_DimInfo") -c_pyx_buffer_type = CStructOrUnionType("__Pyx_Buffer", "struct", None, 1, "__Pyx_Buffer") -c_pyx_buffer_ptr_type = CPtrType(c_pyx_buffer_type) -c_pyx_buffer_nd_type = CStructOrUnionType("__Pyx_LocalBuf_ND", "struct", - None, 1, "__Pyx_LocalBuf_ND") - -cython_memoryview_type = CStructOrUnionType("__pyx_memoryview_obj", "struct", - None, 0, "__pyx_memoryview_obj") - -memoryviewslice_type = CStructOrUnionType("memoryviewslice", "struct", - None, 1, "__Pyx_memviewslice") - -modifiers_and_name_to_type = { - #(signed, longness, name) : type - (0, 0, "char"): c_uchar_type, - (1, 0, "char"): c_char_type, - (2, 0, "char"): c_schar_type, - - (0, -1, "int"): c_ushort_type, - (0, 0, "int"): c_uint_type, - (0, 1, "int"): c_ulong_type, - (0, 2, "int"): c_ulonglong_type, - - (1, -1, "int"): c_short_type, - (1, 0, "int"): c_int_type, - (1, 1, "int"): c_long_type, - (1, 2, "int"): c_longlong_type, - - (2, -1, "int"): c_sshort_type, - (2, 0, "int"): c_sint_type, - (2, 1, "int"): c_slong_type, - (2, 2, "int"): c_slonglong_type, - - (1, 0, "float"): c_float_type, - (1, 0, "double"): c_double_type, - (1, 1, "double"): c_longdouble_type, - - (1, 0, "complex"): c_double_complex_type, # C: float, Python: double => Python wins - (1, 0, "floatcomplex"): c_float_complex_type, - (1, 0, "doublecomplex"): c_double_complex_type, - (1, 1, "doublecomplex"): c_longdouble_complex_type, - - # - (1, 0, "void"): c_void_type, - (1, 0, "Py_tss_t"): c_pytss_t_type, - - (1, 0, "bint"): c_bint_type, - (0, 0, "Py_UNICODE"): c_py_unicode_type, - (0, 0, "Py_UCS4"): c_py_ucs4_type, - (2, 0, "Py_hash_t"): c_py_hash_t_type, - (2, 0, "Py_ssize_t"): c_py_ssize_t_type, - (2, 0, "ssize_t") : c_ssize_t_type, - (0, 0, "size_t") : c_size_t_type, - (2, 0, "ptrdiff_t") : c_ptrdiff_t_type, - - (1, 0, "object"): py_object_type, -} - -def is_promotion(src_type, dst_type): - # It's hard to find a hard definition of promotion, but empirical - # evidence suggests that the below is all that's allowed. - if src_type.is_numeric: - if dst_type.same_as(c_int_type): - unsigned = (not src_type.signed) - return (src_type.is_enum or - (src_type.is_int and - unsigned + src_type.rank < dst_type.rank)) - elif dst_type.same_as(c_double_type): - return src_type.is_float and src_type.rank <= dst_type.rank - return False - -def best_match(arg_types, functions, pos=None, env=None, args=None): - """ - Given a list args of arguments and a list of functions, choose one - to call which seems to be the "best" fit for this list of arguments. - This function is used, e.g., when deciding which overloaded method - to dispatch for C++ classes. - - We first eliminate functions based on arity, and if only one - function has the correct arity, we return it. Otherwise, we weight - functions based on how much work must be done to convert the - arguments, with the following priorities: - * identical types or pointers to identical types - * promotions - * non-Python types - That is, we prefer functions where no arguments need converted, - and failing that, functions where only promotions are required, and - so on. - - If no function is deemed a good fit, or if two or more functions have - the same weight, we return None (as there is no best match). If pos - is not None, we also generate an error. - """ - # TODO: args should be a list of types, not a list of Nodes. - actual_nargs = len(arg_types) - - candidates = [] - errors = [] - for func in functions: - error_mesg = "" - func_type = func.type - if func_type.is_ptr: - func_type = func_type.base_type - # Check function type - if not func_type.is_cfunction: - if not func_type.is_error and pos is not None: - error_mesg = "Calling non-function type '%s'" % func_type - errors.append((func, error_mesg)) - continue - # Check no. of args - max_nargs = len(func_type.args) - min_nargs = max_nargs - func_type.optional_arg_count - if actual_nargs < min_nargs or \ - (not func_type.has_varargs and actual_nargs > max_nargs): - if max_nargs == min_nargs and not func_type.has_varargs: - expectation = max_nargs - elif actual_nargs < min_nargs: - expectation = "at least %s" % min_nargs - else: - expectation = "at most %s" % max_nargs - error_mesg = "Call with wrong number of arguments (expected %s, got %s)" \ - % (expectation, actual_nargs) - errors.append((func, error_mesg)) - continue - if func_type.templates: - deductions = reduce( - merge_template_deductions, - [pattern.type.deduce_template_params(actual) for (pattern, actual) in zip(func_type.args, arg_types)], - {}) - if deductions is None: - errors.append((func, "Unable to deduce type parameters for %s given (%s)" % (func_type, ', '.join(map(str, arg_types))))) - elif len(deductions) < len(func_type.templates): - errors.append((func, "Unable to deduce type parameter %s" % ( - ", ".join([param.name for param in set(func_type.templates) - set(deductions.keys())])))) - else: - type_list = [deductions[param] for param in func_type.templates] - from .Symtab import Entry - specialization = Entry( - name = func.name + "[%s]" % ",".join([str(t) for t in type_list]), - cname = func.cname + "<%s>" % ",".join([t.empty_declaration_code() for t in type_list]), - type = func_type.specialize(deductions), - pos = func.pos) - candidates.append((specialization, specialization.type)) - else: - candidates.append((func, func_type)) - - # Optimize the most common case of no overloading... - if len(candidates) == 1: - return candidates[0][0] - elif len(candidates) == 0: - if pos is not None: - func, errmsg = errors[0] - if len(errors) == 1 or [1 for func, e in errors if e == errmsg]: - error(pos, errmsg) - else: - error(pos, "no suitable method found") - return None - - possibilities = [] - bad_types = [] - needed_coercions = {} - - for index, (func, func_type) in enumerate(candidates): - score = [0,0,0,0,0,0,0] - for i in range(min(actual_nargs, len(func_type.args))): - src_type = arg_types[i] - dst_type = func_type.args[i].type - - assignable = dst_type.assignable_from(src_type) - - # Now take care of unprefixed string literals. So when you call a cdef - # function that takes a char *, the coercion will mean that the - # type will simply become bytes. We need to do this coercion - # manually for overloaded and fused functions - if not assignable: - c_src_type = None - if src_type.is_pyobject: - if src_type.is_builtin_type and src_type.name == 'str' and dst_type.resolve().is_string: - c_src_type = dst_type.resolve() - else: - c_src_type = src_type.default_coerced_ctype() - elif src_type.is_pythran_expr: - c_src_type = src_type.org_buffer - - if c_src_type is not None: - assignable = dst_type.assignable_from(c_src_type) - if assignable: - src_type = c_src_type - needed_coercions[func] = (i, dst_type) - - if assignable: - if src_type == dst_type or dst_type.same_as(src_type): - pass # score 0 - elif func_type.is_strict_signature: - break # exact match requested but not found - elif is_promotion(src_type, dst_type): - score[2] += 1 - elif ((src_type.is_int and dst_type.is_int) or - (src_type.is_float and dst_type.is_float)): - score[2] += abs(dst_type.rank + (not dst_type.signed) - - (src_type.rank + (not src_type.signed))) + 1 - elif dst_type.is_ptr and src_type.is_ptr: - if dst_type.base_type == c_void_type: - score[4] += 1 - elif src_type.base_type.is_cpp_class and src_type.base_type.is_subclass(dst_type.base_type): - score[6] += src_type.base_type.subclass_dist(dst_type.base_type) - else: - score[5] += 1 - elif not src_type.is_pyobject: - score[1] += 1 - else: - score[0] += 1 - else: - error_mesg = "Invalid conversion from '%s' to '%s'" % (src_type, dst_type) - bad_types.append((func, error_mesg)) - break - else: - possibilities.append((score, index, func)) # so we can sort it - - if possibilities: - possibilities.sort() - if len(possibilities) > 1: - score1 = possibilities[0][0] - score2 = possibilities[1][0] - if score1 == score2: - if pos is not None: - error(pos, "ambiguous overloaded method") - return None - - function = possibilities[0][-1] - - if function in needed_coercions and env: - arg_i, coerce_to_type = needed_coercions[function] - args[arg_i] = args[arg_i].coerce_to(coerce_to_type, env) - - return function - - if pos is not None: - if len(bad_types) == 1: - error(pos, bad_types[0][1]) - else: - error(pos, "no suitable method found") - - return None - -def merge_template_deductions(a, b): - if a is None or b is None: - return None - all = a - for param, value in b.items(): - if param in all: - if a[param] != b[param]: - return None - else: - all[param] = value - return all - - -def widest_numeric_type(type1, type2): - """Given two numeric types, return the narrowest type encompassing both of them. - """ - if type1.is_reference: - type1 = type1.ref_base_type - if type2.is_reference: - type2 = type2.ref_base_type - if type1.is_const: - type1 = type1.const_base_type - if type2.is_const: - type2 = type2.const_base_type - if type1 == type2: - widest_type = type1 - elif type1.is_complex or type2.is_complex: - def real_type(ntype): - if ntype.is_complex: - return ntype.real_type - return ntype - widest_type = CComplexType( - widest_numeric_type( - real_type(type1), - real_type(type2))) - elif type1.is_enum and type2.is_enum: - widest_type = c_int_type - elif type1.rank < type2.rank: - widest_type = type2 - elif type1.rank > type2.rank: - widest_type = type1 - elif type1.signed < type2.signed: - widest_type = type1 - elif type1.signed > type2.signed: - widest_type = type2 - elif type1.is_typedef > type2.is_typedef: - widest_type = type1 - else: - widest_type = type2 - return widest_type - - -def numeric_type_fits(small_type, large_type): - return widest_numeric_type(small_type, large_type) == large_type - - -def independent_spanning_type(type1, type2): - # Return a type assignable independently from both type1 and - # type2, but do not require any interoperability between the two. - # For example, in "True * 2", it is safe to assume an integer - # result type (so spanning_type() will do the right thing), - # whereas "x = True or 2" must evaluate to a type that can hold - # both a boolean value and an integer, so this function works - # better. - if type1.is_reference ^ type2.is_reference: - if type1.is_reference: - type1 = type1.ref_base_type - else: - type2 = type2.ref_base_type - if type1 == type2: - return type1 - elif (type1 is c_bint_type or type2 is c_bint_type) and (type1.is_numeric and type2.is_numeric): - # special case: if one of the results is a bint and the other - # is another C integer, we must prevent returning a numeric - # type so that we do not lose the ability to coerce to a - # Python bool if we have to. - return py_object_type - span_type = _spanning_type(type1, type2) - if span_type is None: - return error_type - return span_type - -def spanning_type(type1, type2): - # Return a type assignable from both type1 and type2, or - # py_object_type if no better type is found. Assumes that the - # code that calls this will try a coercion afterwards, which will - # fail if the types cannot actually coerce to a py_object_type. - if type1 == type2: - return type1 - elif type1 is py_object_type or type2 is py_object_type: - return py_object_type - elif type1 is c_py_unicode_type or type2 is c_py_unicode_type: - # Py_UNICODE behaves more like a string than an int - return py_object_type - span_type = _spanning_type(type1, type2) - if span_type is None: - return py_object_type - return span_type - -def _spanning_type(type1, type2): - if type1.is_numeric and type2.is_numeric: - return widest_numeric_type(type1, type2) - elif type1.is_builtin_type and type1.name == 'float' and type2.is_numeric: - return widest_numeric_type(c_double_type, type2) - elif type2.is_builtin_type and type2.name == 'float' and type1.is_numeric: - return widest_numeric_type(type1, c_double_type) - elif type1.is_extension_type and type2.is_extension_type: - return widest_extension_type(type1, type2) - elif type1.is_pyobject or type2.is_pyobject: - return py_object_type - elif type1.assignable_from(type2): - if type1.is_extension_type and type1.typeobj_is_imported(): - # external types are unsafe, so we use PyObject instead - return py_object_type - return type1 - elif type2.assignable_from(type1): - if type2.is_extension_type and type2.typeobj_is_imported(): - # external types are unsafe, so we use PyObject instead - return py_object_type - return type2 - elif type1.is_ptr and type2.is_ptr: - if type1.base_type.is_cpp_class and type2.base_type.is_cpp_class: - common_base = widest_cpp_type(type1.base_type, type2.base_type) - if common_base: - return CPtrType(common_base) - # incompatible pointers, void* will do as a result - return c_void_ptr_type - else: - return None - -def widest_extension_type(type1, type2): - if type1.typeobj_is_imported() or type2.typeobj_is_imported(): - return py_object_type - while True: - if type1.subtype_of(type2): - return type2 - elif type2.subtype_of(type1): - return type1 - type1, type2 = type1.base_type, type2.base_type - if type1 is None or type2 is None: - return py_object_type - -def widest_cpp_type(type1, type2): - @cached_function - def bases(type): - all = set() - for base in type.base_classes: - all.add(base) - all.update(bases(base)) - return all - common_bases = bases(type1).intersection(bases(type2)) - common_bases_bases = reduce(set.union, [bases(b) for b in common_bases], set()) - candidates = [b for b in common_bases if b not in common_bases_bases] - if len(candidates) == 1: - return candidates[0] - else: - # Fall back to void* for now. - return None - - -def simple_c_type(signed, longness, name): - # Find type descriptor for simple type given name and modifiers. - # Returns None if arguments don't make sense. - return modifiers_and_name_to_type.get((signed, longness, name)) - -def parse_basic_type(name): - base = None - if name.startswith('p_'): - base = parse_basic_type(name[2:]) - elif name.startswith('p'): - base = parse_basic_type(name[1:]) - elif name.endswith('*'): - base = parse_basic_type(name[:-1]) - if base: - return CPtrType(base) - # - basic_type = simple_c_type(1, 0, name) - if basic_type: - return basic_type - # - signed = 1 - longness = 0 - if name == 'Py_UNICODE': - signed = 0 - elif name == 'Py_UCS4': - signed = 0 - elif name == 'Py_hash_t': - signed = 2 - elif name == 'Py_ssize_t': - signed = 2 - elif name == 'ssize_t': - signed = 2 - elif name == 'size_t': - signed = 0 - else: - if name.startswith('u'): - name = name[1:] - signed = 0 - elif (name.startswith('s') and - not name.startswith('short')): - name = name[1:] - signed = 2 - longness = 0 - while name.startswith('short'): - name = name.replace('short', '', 1).strip() - longness -= 1 - while name.startswith('long'): - name = name.replace('long', '', 1).strip() - longness += 1 - if longness != 0 and not name: - name = 'int' - return simple_c_type(signed, longness, name) - -def c_array_type(base_type, size): - # Construct a C array type. - if base_type is error_type: - return error_type - else: - return CArrayType(base_type, size) - -def c_ptr_type(base_type): - # Construct a C pointer type. - if base_type is error_type: - return error_type - elif base_type.is_reference: - return CPtrType(base_type.ref_base_type) - else: - return CPtrType(base_type) - -def c_ref_type(base_type): - # Construct a C reference type - if base_type is error_type: - return error_type - else: - return CReferenceType(base_type) - -def c_const_type(base_type): - # Construct a C const type. - if base_type is error_type: - return error_type - else: - return CConstType(base_type) - -def same_type(type1, type2): - return type1.same_as(type2) - -def assignable_from(type1, type2): - return type1.assignable_from(type2) - -def typecast(to_type, from_type, expr_code): - # Return expr_code cast to a C type which can be - # assigned to to_type, assuming its existing C type - # is from_type. - if (to_type is from_type or - (not to_type.is_pyobject and assignable_from(to_type, from_type))): - return expr_code - elif (to_type is py_object_type and from_type and - from_type.is_builtin_type and from_type.name != 'type'): - # no cast needed, builtins are PyObject* already - return expr_code - else: - #print "typecast: to", to_type, "from", from_type ### - return to_type.cast_code(expr_code) - -def type_list_identifier(types): - return cap_length('__and_'.join(type_identifier(type) for type in types)) - -_type_identifier_cache = {} -def type_identifier(type): - decl = type.empty_declaration_code() - safe = _type_identifier_cache.get(decl) - if safe is None: - safe = decl - safe = re.sub(' +', ' ', safe) - safe = re.sub(' ([^a-zA-Z0-9_])', r'\1', safe) - safe = re.sub('([^a-zA-Z0-9_]) ', r'\1', safe) - safe = (safe.replace('__', '__dunder') - .replace('const ', '__const_') - .replace(' ', '__space_') - .replace('*', '__ptr') - .replace('&', '__ref') - .replace('[', '__lArr') - .replace(']', '__rArr') - .replace('<', '__lAng') - .replace('>', '__rAng') - .replace('(', '__lParen') - .replace(')', '__rParen') - .replace(',', '__comma_') - .replace('::', '__in_')) - safe = cap_length(re.sub('[^a-zA-Z0-9_]', lambda x: '__%X' % ord(x.group(0)), safe)) - _type_identifier_cache[decl] = safe - return safe - -def cap_length(s, max_prefix=63, max_len=1024): - if len(s) <= max_prefix: - return s - hash_prefix = hashlib.sha256(s.encode('ascii')).hexdigest()[:6] - return '%s__%s__etc' % (hash_prefix, s[:max_len-17]) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Scanning.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Scanning.py deleted file mode 100644 index f61144033cb74cf577f079ad35417c97970aff19..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Scanning.py +++ /dev/null @@ -1,551 +0,0 @@ -# cython: infer_types=True, language_level=3, py2_import=True, auto_pickle=False -# -# Cython Scanner -# - -from __future__ import absolute_import - -import cython -cython.declare(make_lexicon=object, lexicon=object, - print_function=object, error=object, warning=object, - os=object, platform=object) - -import os -import platform - -from .. import Utils -from ..Plex.Scanners import Scanner -from ..Plex.Errors import UnrecognizedInput -from .Errors import error, warning -from .Lexicon import any_string_prefix, make_lexicon, IDENT -from .Future import print_function - -debug_scanner = 0 -trace_scanner = 0 -scanner_debug_flags = 0 -scanner_dump_file = None - -lexicon = None - - -def get_lexicon(): - global lexicon - if not lexicon: - lexicon = make_lexicon() - return lexicon - - -#------------------------------------------------------------------ - -py_reserved_words = [ - "global", "nonlocal", "def", "class", "print", "del", "pass", "break", - "continue", "return", "raise", "import", "exec", "try", - "except", "finally", "while", "if", "elif", "else", "for", - "in", "assert", "and", "or", "not", "is", "lambda", - "from", "yield", "with", -] - -pyx_reserved_words = py_reserved_words + [ - "include", "ctypedef", "cdef", "cpdef", - "cimport", "DEF", "IF", "ELIF", "ELSE" -] - - -class Method(object): - - def __init__(self, name, **kwargs): - self.name = name - self.kwargs = kwargs or None - self.__name__ = name # for Plex tracing - - def __call__(self, stream, text): - method = getattr(stream, self.name) - # self.kwargs is almost always unused => avoid call overhead - return method(text, **self.kwargs) if self.kwargs is not None else method(text) - - def __copy__(self): - return self # immutable, no need to copy - - def __deepcopy__(self, memo): - return self # immutable, no need to copy - - -#------------------------------------------------------------------ - -class CompileTimeScope(object): - - def __init__(self, outer=None): - self.entries = {} - self.outer = outer - - def declare(self, name, value): - self.entries[name] = value - - def update(self, other): - self.entries.update(other) - - def lookup_here(self, name): - return self.entries[name] - - def __contains__(self, name): - return name in self.entries - - def lookup(self, name): - try: - return self.lookup_here(name) - except KeyError: - outer = self.outer - if outer: - return outer.lookup(name) - else: - raise - - -def initial_compile_time_env(): - benv = CompileTimeScope() - names = ('UNAME_SYSNAME', 'UNAME_NODENAME', 'UNAME_RELEASE', 'UNAME_VERSION', 'UNAME_MACHINE') - for name, value in zip(names, platform.uname()): - benv.declare(name, value) - try: - import __builtin__ as builtins - except ImportError: - import builtins - - names = ( - 'False', 'True', - 'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'bytearray', 'bytes', - 'chr', 'cmp', 'complex', 'dict', 'divmod', 'enumerate', 'filter', - 'float', 'format', 'frozenset', 'hash', 'hex', 'int', 'len', - 'list', 'map', 'max', 'min', 'oct', 'ord', 'pow', 'range', - 'repr', 'reversed', 'round', 'set', 'slice', 'sorted', 'str', - 'sum', 'tuple', 'zip', - ### defined below in a platform independent way - # 'long', 'unicode', 'reduce', 'xrange' - ) - - for name in names: - try: - benv.declare(name, getattr(builtins, name)) - except AttributeError: - # ignore, likely Py3 - pass - - # Py2/3 adaptations - from functools import reduce - benv.declare('reduce', reduce) - benv.declare('unicode', getattr(builtins, 'unicode', getattr(builtins, 'str'))) - benv.declare('long', getattr(builtins, 'long', getattr(builtins, 'int'))) - benv.declare('xrange', getattr(builtins, 'xrange', getattr(builtins, 'range'))) - - denv = CompileTimeScope(benv) - return denv - - -#------------------------------------------------------------------ - -class SourceDescriptor(object): - """ - A SourceDescriptor should be considered immutable. - """ - filename = None - - _file_type = 'pyx' - - _escaped_description = None - _cmp_name = '' - def __str__(self): - assert False # To catch all places where a descriptor is used directly as a filename - - def set_file_type_from_name(self, filename): - name, ext = os.path.splitext(filename) - self._file_type = ext in ('.pyx', '.pxd', '.py') and ext[1:] or 'pyx' - - def is_cython_file(self): - return self._file_type in ('pyx', 'pxd') - - def is_python_file(self): - return self._file_type == 'py' - - def get_escaped_description(self): - if self._escaped_description is None: - esc_desc = \ - self.get_description().encode('ASCII', 'replace').decode("ASCII") - # Use forward slashes on Windows since these paths - # will be used in the #line directives in the C/C++ files. - self._escaped_description = esc_desc.replace('\\', '/') - return self._escaped_description - - def __gt__(self, other): - # this is only used to provide some sort of order - try: - return self._cmp_name > other._cmp_name - except AttributeError: - return False - - def __lt__(self, other): - # this is only used to provide some sort of order - try: - return self._cmp_name < other._cmp_name - except AttributeError: - return False - - def __le__(self, other): - # this is only used to provide some sort of order - try: - return self._cmp_name <= other._cmp_name - except AttributeError: - return False - - def __copy__(self): - return self # immutable, no need to copy - - def __deepcopy__(self, memo): - return self # immutable, no need to copy - - -class FileSourceDescriptor(SourceDescriptor): - """ - Represents a code source. A code source is a more generic abstraction - for a "filename" (as sometimes the code doesn't come from a file). - Instances of code sources are passed to Scanner.__init__ as the - optional name argument and will be passed back when asking for - the position()-tuple. - """ - def __init__(self, filename, path_description=None): - filename = Utils.decode_filename(filename) - self.path_description = path_description or filename - self.filename = filename - # Prefer relative paths to current directory (which is most likely the project root) over absolute paths. - workdir = os.path.abspath('.') + os.sep - self.file_path = filename[len(workdir):] if filename.startswith(workdir) else filename - self.set_file_type_from_name(filename) - self._cmp_name = filename - self._lines = {} - - def get_lines(self, encoding=None, error_handling=None): - # we cache the lines only the second time this is called, in - # order to save memory when they are only used once - key = (encoding, error_handling) - try: - lines = self._lines[key] - if lines is not None: - return lines - except KeyError: - pass - - with Utils.open_source_file(self.filename, encoding=encoding, error_handling=error_handling) as f: - lines = list(f) - - if key in self._lines: - self._lines[key] = lines - else: - # do not cache the first access, but remember that we - # already read it once - self._lines[key] = None - return lines - - def get_description(self): - try: - return os.path.relpath(self.path_description) - except ValueError: - # path not under current directory => use complete file path - return self.path_description - - def get_error_description(self): - path = self.filename - cwd = Utils.decode_filename(os.getcwd() + os.path.sep) - if path.startswith(cwd): - return path[len(cwd):] - return path - - def get_filenametable_entry(self): - return self.file_path - - def __eq__(self, other): - return isinstance(other, FileSourceDescriptor) and self.filename == other.filename - - def __hash__(self): - return hash(self.filename) - - def __repr__(self): - return "<FileSourceDescriptor:%s>" % self.filename - - -class StringSourceDescriptor(SourceDescriptor): - """ - Instances of this class can be used instead of a filenames if the - code originates from a string object. - """ - def __init__(self, name, code): - self.name = name - #self.set_file_type_from_name(name) - self.codelines = [x + "\n" for x in code.split("\n")] - self._cmp_name = name - - def get_lines(self, encoding=None, error_handling=None): - if not encoding: - return self.codelines - else: - return [line.encode(encoding, error_handling).decode(encoding) - for line in self.codelines] - - def get_description(self): - return self.name - - get_error_description = get_description - - def get_filenametable_entry(self): - return "stringsource" - - def __hash__(self): - return id(self) - # Do not hash on the name, an identical string source should be the - # same object (name is often defaulted in other places) - # return hash(self.name) - - def __eq__(self, other): - return isinstance(other, StringSourceDescriptor) and self.name == other.name - - def __repr__(self): - return "<StringSourceDescriptor:%s>" % self.name - - -#------------------------------------------------------------------ - -class PyrexScanner(Scanner): - # context Context Compilation context - # included_files [string] Files included with 'include' statement - # compile_time_env dict Environment for conditional compilation - # compile_time_eval boolean In a true conditional compilation context - # compile_time_expr boolean In a compile-time expression context - - def __init__(self, file, filename, parent_scanner=None, - scope=None, context=None, source_encoding=None, parse_comments=True, initial_pos=None): - Scanner.__init__(self, get_lexicon(), file, filename, initial_pos) - - if filename.is_python_file(): - self.in_python_file = True - self.keywords = set(py_reserved_words) - else: - self.in_python_file = False - self.keywords = set(pyx_reserved_words) - - self.async_enabled = 0 - - if parent_scanner: - self.context = parent_scanner.context - self.included_files = parent_scanner.included_files - self.compile_time_env = parent_scanner.compile_time_env - self.compile_time_eval = parent_scanner.compile_time_eval - self.compile_time_expr = parent_scanner.compile_time_expr - - if parent_scanner.async_enabled: - self.enter_async() - else: - self.context = context - self.included_files = scope.included_files - self.compile_time_env = initial_compile_time_env() - self.compile_time_eval = 1 - self.compile_time_expr = 0 - if getattr(context.options, 'compile_time_env', None): - self.compile_time_env.update(context.options.compile_time_env) - self.parse_comments = parse_comments - self.source_encoding = source_encoding - self.trace = trace_scanner - self.indentation_stack = [0] - self.indentation_char = None - self.bracket_nesting_level = 0 - - self.begin('INDENT') - self.sy = '' - self.next() - - def commentline(self, text): - if self.parse_comments: - self.produce('commentline', text) - - def strip_underscores(self, text, symbol): - self.produce(symbol, text.replace('_', '')) - - def current_level(self): - return self.indentation_stack[-1] - - def open_bracket_action(self, text): - self.bracket_nesting_level += 1 - return text - - def close_bracket_action(self, text): - self.bracket_nesting_level -= 1 - return text - - def newline_action(self, text): - if self.bracket_nesting_level == 0: - self.begin('INDENT') - self.produce('NEWLINE', '') - - string_states = { - "'": 'SQ_STRING', - '"': 'DQ_STRING', - "'''": 'TSQ_STRING', - '"""': 'TDQ_STRING' - } - - def begin_string_action(self, text): - while text[:1] in any_string_prefix: - text = text[1:] - self.begin(self.string_states[text]) - self.produce('BEGIN_STRING') - - def end_string_action(self, text): - self.begin('') - self.produce('END_STRING') - - def unclosed_string_action(self, text): - self.end_string_action(text) - self.error("Unclosed string literal") - - def indentation_action(self, text): - self.begin('') - # Indentation within brackets should be ignored. - #if self.bracket_nesting_level > 0: - # return - # Check that tabs and spaces are being used consistently. - if text: - c = text[0] - #print "Scanner.indentation_action: indent with", repr(c) ### - if self.indentation_char is None: - self.indentation_char = c - #print "Scanner.indentation_action: setting indent_char to", repr(c) - else: - if self.indentation_char != c: - self.error("Mixed use of tabs and spaces") - if text.replace(c, "") != "": - self.error("Mixed use of tabs and spaces") - # Figure out how many indents/dedents to do - current_level = self.current_level() - new_level = len(text) - #print "Changing indent level from", current_level, "to", new_level ### - if new_level == current_level: - return - elif new_level > current_level: - #print "...pushing level", new_level ### - self.indentation_stack.append(new_level) - self.produce('INDENT', '') - else: - while new_level < self.current_level(): - #print "...popping level", self.indentation_stack[-1] ### - self.indentation_stack.pop() - self.produce('DEDENT', '') - #print "...current level now", self.current_level() ### - if new_level != self.current_level(): - self.error("Inconsistent indentation") - - def eof_action(self, text): - while len(self.indentation_stack) > 1: - self.produce('DEDENT', '') - self.indentation_stack.pop() - self.produce('EOF', '') - - def next(self): - try: - sy, systring = self.read() - except UnrecognizedInput: - self.error("Unrecognized character") - return # just a marker, error() always raises - if sy == IDENT: - if systring in self.keywords: - if systring == u'print' and print_function in self.context.future_directives: - self.keywords.discard('print') - elif systring == u'exec' and self.context.language_level >= 3: - self.keywords.discard('exec') - else: - sy = systring - systring = self.context.intern_ustring(systring) - self.sy = sy - self.systring = systring - if False: # debug_scanner: - _, line, col = self.position() - if not self.systring or self.sy == self.systring: - t = self.sy - else: - t = "%s %s" % (self.sy, self.systring) - print("--- %3d %2d %s" % (line, col, t)) - - def peek(self): - saved = self.sy, self.systring - self.next() - next = self.sy, self.systring - self.unread(*next) - self.sy, self.systring = saved - return next - - def put_back(self, sy, systring): - self.unread(self.sy, self.systring) - self.sy = sy - self.systring = systring - - def unread(self, token, value): - # This method should be added to Plex - self.queue.insert(0, (token, value)) - - def error(self, message, pos=None, fatal=True): - if pos is None: - pos = self.position() - if self.sy == 'INDENT': - error(pos, "Possible inconsistent indentation") - err = error(pos, message) - if fatal: raise err - - def expect(self, what, message=None): - if self.sy == what: - self.next() - else: - self.expected(what, message) - - def expect_keyword(self, what, message=None): - if self.sy == IDENT and self.systring == what: - self.next() - else: - self.expected(what, message) - - def expected(self, what, message=None): - if message: - self.error(message) - else: - if self.sy == IDENT: - found = self.systring - else: - found = self.sy - self.error("Expected '%s', found '%s'" % (what, found)) - - def expect_indent(self): - self.expect('INDENT', "Expected an increase in indentation level") - - def expect_dedent(self): - self.expect('DEDENT', "Expected a decrease in indentation level") - - def expect_newline(self, message="Expected a newline", ignore_semicolon=False): - # Expect either a newline or end of file - useless_trailing_semicolon = None - if ignore_semicolon and self.sy == ';': - useless_trailing_semicolon = self.position() - self.next() - if self.sy != 'EOF': - self.expect('NEWLINE', message) - if useless_trailing_semicolon is not None: - warning(useless_trailing_semicolon, "useless trailing semicolon") - - def enter_async(self): - self.async_enabled += 1 - if self.async_enabled == 1: - self.keywords.add('async') - self.keywords.add('await') - - def exit_async(self): - assert self.async_enabled > 0 - self.async_enabled -= 1 - if not self.async_enabled: - self.keywords.discard('await') - self.keywords.discard('async') - if self.sy in ('async', 'await'): - self.sy, self.systring = IDENT, self.context.intern_ustring(self.sy) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/Embed.c b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/Embed.c deleted file mode 100644 index 8f7e8f46e9235b16891f8be6b756e7c84cd85fce..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/Embed.c +++ /dev/null @@ -1,219 +0,0 @@ -//////////////////// MainFunction //////////////////// - -#ifdef __FreeBSD__ -#include <floatingpoint.h> -#endif - -#if PY_MAJOR_VERSION < 3 -int %(main_method)s(int argc, char** argv) { -#elif defined(WIN32) || defined(MS_WINDOWS) -int %(wmain_method)s(int argc, wchar_t **argv) { -#else -static int __Pyx_main(int argc, wchar_t **argv) { -#endif - /* 754 requires that FP exceptions run in "no stop" mode by default, - * and until C vendors implement C99's ways to control FP exceptions, - * Python requires non-stop mode. Alas, some platforms enable FP - * exceptions by default. Here we disable them. - */ -#ifdef __FreeBSD__ - fp_except_t m; - - m = fpgetmask(); - fpsetmask(m & ~FP_X_OFL); -#endif - if (argc && argv) - Py_SetProgramName(argv[0]); - Py_Initialize(); - if (argc && argv) - PySys_SetArgv(argc, argv); - { /* init module '%(module_name)s' as '__main__' */ - PyObject* m = NULL; - %(module_is_main)s = 1; - #if PY_MAJOR_VERSION < 3 - init%(module_name)s(); - #elif CYTHON_PEP489_MULTI_PHASE_INIT - m = PyInit_%(module_name)s(); - if (!PyModule_Check(m)) { - PyModuleDef *mdef = (PyModuleDef *) m; - PyObject *modname = PyUnicode_FromString("__main__"); - m = NULL; - if (modname) { - // FIXME: not currently calling PyModule_FromDefAndSpec() here because we do not have a module spec! - // FIXME: not currently setting __file__, __path__, __spec__, ... - m = PyModule_NewObject(modname); - Py_DECREF(modname); - if (m) PyModule_ExecDef(m, mdef); - } - } - #else - m = PyInit_%(module_name)s(); - #endif - if (PyErr_Occurred()) { - PyErr_Print(); /* This exits with the right code if SystemExit. */ - #if PY_MAJOR_VERSION < 3 - if (Py_FlushLine()) PyErr_Clear(); - #endif - return 1; - } - Py_XDECREF(m); - } -#if PY_VERSION_HEX < 0x03060000 - Py_Finalize(); -#else - if (Py_FinalizeEx() < 0) - return 2; -#endif - return 0; -} - - -#if PY_MAJOR_VERSION >= 3 && !defined(WIN32) && !defined(MS_WINDOWS) -#include <locale.h> - -static wchar_t* -__Pyx_char2wchar(char* arg) -{ - wchar_t *res; -#ifdef HAVE_BROKEN_MBSTOWCS - /* Some platforms have a broken implementation of - * mbstowcs which does not count the characters that - * would result from conversion. Use an upper bound. - */ - size_t argsize = strlen(arg); -#else - size_t argsize = mbstowcs(NULL, arg, 0); -#endif - size_t count; - unsigned char *in; - wchar_t *out; -#ifdef HAVE_MBRTOWC - mbstate_t mbs; -#endif - if (argsize != (size_t)-1) { - res = (wchar_t *)malloc((argsize+1)*sizeof(wchar_t)); - if (!res) - goto oom; - count = mbstowcs(res, arg, argsize+1); - if (count != (size_t)-1) { - wchar_t *tmp; - /* Only use the result if it contains no - surrogate characters. */ - for (tmp = res; *tmp != 0 && - (*tmp < 0xd800 || *tmp > 0xdfff); tmp++) - ; - if (*tmp == 0) - return res; - } - free(res); - } - /* Conversion failed. Fall back to escaping with surrogateescape. */ -#ifdef HAVE_MBRTOWC - /* Try conversion with mbrtwoc (C99), and escape non-decodable bytes. */ - - /* Overallocate; as multi-byte characters are in the argument, the - actual output could use less memory. */ - argsize = strlen(arg) + 1; - res = (wchar_t *)malloc(argsize*sizeof(wchar_t)); - if (!res) goto oom; - in = (unsigned char*)arg; - out = res; - memset(&mbs, 0, sizeof mbs); - while (argsize) { - size_t converted = mbrtowc(out, (char*)in, argsize, &mbs); - if (converted == 0) - /* Reached end of string; null char stored. */ - break; - if (converted == (size_t)-2) { - /* Incomplete character. This should never happen, - since we provide everything that we have - - unless there is a bug in the C library, or I - misunderstood how mbrtowc works. */ - fprintf(stderr, "unexpected mbrtowc result -2\\n"); - free(res); - return NULL; - } - if (converted == (size_t)-1) { - /* Conversion error. Escape as UTF-8b, and start over - in the initial shift state. */ - *out++ = 0xdc00 + *in++; - argsize--; - memset(&mbs, 0, sizeof mbs); - continue; - } - if (*out >= 0xd800 && *out <= 0xdfff) { - /* Surrogate character. Escape the original - byte sequence with surrogateescape. */ - argsize -= converted; - while (converted--) - *out++ = 0xdc00 + *in++; - continue; - } - /* successfully converted some bytes */ - in += converted; - argsize -= converted; - out++; - } -#else - /* Cannot use C locale for escaping; manually escape as if charset - is ASCII (i.e. escape all bytes > 128. This will still roundtrip - correctly in the locale's charset, which must be an ASCII superset. */ - res = (wchar_t *)malloc((strlen(arg)+1)*sizeof(wchar_t)); - if (!res) goto oom; - in = (unsigned char*)arg; - out = res; - while(*in) - if(*in < 128) - *out++ = *in++; - else - *out++ = 0xdc00 + *in++; - *out = 0; -#endif - return res; -oom: - fprintf(stderr, "out of memory\\n"); - return NULL; -} - -int -%(main_method)s(int argc, char **argv) -{ - if (!argc) { - return __Pyx_main(0, NULL); - } - else { - int i, res; - wchar_t **argv_copy = (wchar_t **)malloc(sizeof(wchar_t*)*argc); - /* We need a second copy, as Python might modify the first one. */ - wchar_t **argv_copy2 = (wchar_t **)malloc(sizeof(wchar_t*)*argc); - char *oldloc = strdup(setlocale(LC_ALL, NULL)); - if (!argv_copy || !argv_copy2 || !oldloc) { - fprintf(stderr, "out of memory\\n"); - free(argv_copy); - free(argv_copy2); - free(oldloc); - return 1; - } - res = 0; - setlocale(LC_ALL, ""); - for (i = 0; i < argc; i++) { - argv_copy2[i] = argv_copy[i] = __Pyx_char2wchar(argv[i]); - if (!argv_copy[i]) res = 1; /* failure, but continue to simplify cleanup */ - } - setlocale(LC_ALL, oldloc); - free(oldloc); - if (res == 0) - res = __Pyx_main(argc, argv_copy); - for (i = 0; i < argc; i++) { -#if PY_VERSION_HEX < 0x03050000 - free(argv_copy2[i]); -#else - PyMem_RawFree(argv_copy2[i]); -#endif - } - free(argv_copy); - free(argv_copy2); - return res; - } -} -#endif diff --git a/spaces/asciicorp/hotel-chat/tools_extended.py b/spaces/asciicorp/hotel-chat/tools_extended.py deleted file mode 100644 index 652c1818320df7983fcddcdede7bc212fd10c4c6..0000000000000000000000000000000000000000 --- a/spaces/asciicorp/hotel-chat/tools_extended.py +++ /dev/null @@ -1,309 +0,0 @@ -from langchain.agents import Tool -from greeting import greet_llm -from datetime import datetime, timedelta -import sqlite3 -import uuid -import os -from vector_qa import hotel_details_chain, room_details_chain - - -def empty_cart(): - conn = sqlite3.connect('hotel.db') - c = conn.cursor() - c.execute("DELETE FROM reservation_cart") - conn.commit() - conn.close() - -# empty the cart at startup -empty_cart() - -if os.path.exists('conversation_id.txt'): - os.remove('conversation_id.txt') - -def get_current_date(string): - now = datetime.now() - today = now.strftime("%A, %Y/%m/%d") - tomorrow = (now + timedelta(days=1)).strftime("%A, %Y/%m/%d") - this_saturday = (now + timedelta((5-now.weekday()) % 7)).strftime("%A, %Y/%m/%d") - this_sunday = (now + timedelta((6-now.weekday()) % 7)).strftime("%A, %Y/%m/%d") - return f"today's date is {today}\ntomorrow is {tomorrow}\nthis weekend is {this_saturday} - {this_sunday}" - -def get_room_options(string): - conn = sqlite3.connect('hotel.db') - cursor = conn.cursor() - room_options = cursor.execute("SELECT * FROM room_options").fetchall() - - options_str = "Available room options are:\n" - for option in room_options: - room_type = option[0] - num_people = option[1] - price = option[2] - intro = option[4] - options_str += f"{room_type} which can accommodate {num_people} priced at {price}. Its intro is {intro}\n" - - conn.close() - return options_str - -def get_available_rooms(date): - conn = sqlite3.connect('hotel.db') - cursor = conn.cursor() - room_options = cursor.execute("SELECT * FROM room_options").fetchall() - available_rooms_str = "Available rooms on {}:\n".format(date) - - for option in room_options: - room_type_abbrev = ''.join([word[0] for word in option[0].split()]) - num_rooms = option[3] - - available_rooms = cursor.execute(f"SELECT COUNT(*) FROM {room_type_abbrev}_bookings WHERE date = '{date}' AND availability = 1").fetchone()[0] - room_number = cursor.execute(f"SELECT room_number FROM {room_type_abbrev}_bookings WHERE date = '{date}' AND availability = 1 ORDER BY room_number ASC LIMIT 1").fetchone() - available_rooms_str += f"We have {available_rooms} of {option[0]} on {date}. The first available room number is {room_number[0]}.\n" - - conn.close() - return available_rooms_str - - -def start_booking(string): - conn = sqlite3.connect('hotel.db') - cursor = conn.cursor() - - conversation_id = str(uuid.uuid4()) - with open('conversation_id.txt', 'w') as f: - f.write(conversation_id) - - date_time = datetime.now().strftime('%Y/%m/%d %H:%M:%S') - cursor.execute(f"INSERT INTO reservation_cart VALUES ('{date_time}', '{conversation_id}', NULL, NULL, NULL, NULL, NULL)") - - conn.commit() - conn.close() - - return "generate a final answer saying you have started the booking process and you must ask for customer name." - -def add_name(name): - if not name: - return "Please ask the customer for their name." - - try: - with open('conversation_id.txt', 'r') as f: - conversation_id = f.read().strip() - except FileNotFoundError: - return "I do not need to save your name." - - conn = sqlite3.connect('hotel.db') - cursor = conn.cursor() - cursor.execute(f"UPDATE reservation_cart SET customer_name = '{name}' WHERE conversation_id = '{conversation_id}'") - conn.commit() - conn.close() - - return f"generate a final answer informing {name} added and you must ask the customer the number of people that will be staying." - -def add_people(num_people): - if not num_people: - return "Please ask the customer for the number of people that will be staying." - try: - with open('conversation_id.txt', 'r') as f: - conversation_id = f.read().strip() - except FileNotFoundError: - return "I do not need to save the number of people." - - conn = sqlite3.connect('hotel.db') - cursor = conn.cursor() - cursor.execute(f"UPDATE reservation_cart SET num_people = {num_people} WHERE conversation_id = '{conversation_id}'") - conn.commit() - conn.close() - - return f"generate a final answer informaing you have added the number of people as {num_people} and you must ask the customer for the arrival date" - -def add_arrival_date(date): - if not date: - return "Please ask the customer for their arrival date." - try: - with open('conversation_id.txt', 'r') as f: - conversation_id = f.read().strip() - except FileNotFoundError: - return "I do not need to save the arrival date." - - conn = sqlite3.connect('hotel.db') - cursor = conn.cursor() - cursor.execute(f"UPDATE reservation_cart SET arrival_date = '{date}' WHERE conversation_id = '{conversation_id}'") - conn.commit() - conn.close() - - return "generate a final answer informaing you have saved the arrival date and you must ask the customer for departure date" - -def add_departure_date(date): - if not date: - return "Please ask the customer for their departure date." - try: - with open('conversation_id.txt', 'r') as f: - conversation_id = f.read().strip() - except FileNotFoundError: - return "I do not need to save the departure date." - - conn = sqlite3.connect('hotel.db') - cursor = conn.cursor() - cursor.execute(f"UPDATE reservation_cart SET departure_date = '{date}' WHERE conversation_id = '{conversation_id}'") - conn.commit() - conn.close() - - return "generate a final answer informing you have saved the departure date and you must ask the customer the prefered room type" - -def add_room_type(room_type): - if not room_type: - return "Please ask the customer for their selected room type." - - try: - with open('conversation_id.txt', 'r') as f: - conversation_id = f.read().strip() - except FileNotFoundError: - return "No need to save room type." - - conn = sqlite3.connect('hotel.db') - cursor = conn.cursor() - - # Make the given room type lowercase for case-insensitive comparison - room_type_lower = room_type.lower() - - # Get valid room type options from the room_options table - valid_options = [row[0] for row in cursor.execute("SELECT room_type FROM room_options").fetchall()] - - # Check if the supplied room type matches any of the valid options (case-insensitive) - matches = [option for option in valid_options if option.lower() == room_type_lower] - if not matches: - return f"Generate a final answer saying Unfortunately, {room_type} is not a valid room option. The valid room options are: {', '.join(valid_options)}" - - selected_option = matches[0] - - # Check if there are available rooms in the correct {room_type_abbrev}_bookings table - room_type_abbrev = ''.join([word[0] for word in selected_option.split()]) - arrival_date = cursor.execute(f"SELECT arrival_date FROM reservation_cart WHERE conversation_id = '{conversation_id}'").fetchone()[0] - departure_date = cursor.execute(f"SELECT departure_date FROM reservation_cart WHERE conversation_id = '{conversation_id}'").fetchone()[0] - available_rooms = cursor.execute(f"SELECT room_number FROM {room_type_abbrev.lower()}_bookings WHERE date BETWEEN '{arrival_date}' AND '{departure_date}' AND availability = 1").fetchall() - if not available_rooms: - return f"Generate a final answer saying Unfortunately, there are no available rooms of {selected_option} from {arrival_date} to {departure_date} Please pick a different room option." - - # Add the room type to reservation cart - cursor.execute(f"UPDATE reservation_cart SET room_type = '{selected_option}' WHERE conversation_id = '{conversation_id}'") - - # Write the lowest room number of available room of the selected option to a temp file - lowest_room_number = available_rooms[0][0] - with open('room_num.txt', 'w') as f: - f.write(lowest_room_number) - - num_people = cursor.execute(f"SELECT num_people FROM reservation_cart WHERE conversation_id = '{conversation_id}'").fetchone()[0] - name = cursor.execute(f"SELECT customer_name FROM reservation_cart WHERE conversation_id = '{conversation_id}'").fetchone()[0] - arrival_date = cursor.execute(f"SELECT arrival_date FROM reservation_cart WHERE conversation_id = '{conversation_id}'").fetchone()[0] - departure_date = cursor.execute(f"SELECT departure_date FROM reservation_cart WHERE conversation_id = '{conversation_id}'").fetchone()[0] - - conn.commit() - conn.close() - - return f"Generate a final answer saying Booking is almost complete Please confirm the following information: Name is {name}, arrival date is {arrival_date}, departure date is {departure_date}. {num_people} will be staying in {selected_option}." - -def confirm_booking(string): - try: - with open('conversation_id.txt', 'r') as f: - conversation_id = f.read().strip() - with open('room_num.txt', 'r') as f: - room_number = f.read().strip() - except FileNotFoundError: - return "I do not need to confirm your order yet." - - conn = sqlite3.connect('hotel.db') - cursor = conn.cursor() - - # Get information from reservation_cart table - customer_name = cursor.execute(f"SELECT customer_name FROM reservation_cart WHERE conversation_id = '{conversation_id}'").fetchone()[0] - arrival_date = cursor.execute(f"SELECT arrival_date FROM reservation_cart WHERE conversation_id = '{conversation_id}'").fetchone()[0] - departure_date = cursor.execute(f"SELECT departure_date FROM reservation_cart WHERE conversation_id = '{conversation_id}'").fetchone()[0] - num_people = cursor.execute(f"SELECT num_people FROM reservation_cart WHERE conversation_id = '{conversation_id}'").fetchone()[0] - room_type = cursor.execute(f"SELECT room_type FROM reservation_cart WHERE conversation_id = '{conversation_id}'").fetchone()[0] - - # Add customer to customer_info table - unique_id = uuid.uuid4().hex[:8] # Generate 8-character hexadecimal unique ID - cursor.execute(f"INSERT INTO customer_info (customer_id, customer_name, conversation_id, num_people, arrival_date, departure_date, room_type, room_number) VALUES ('{unique_id}', '{customer_name}', '{conversation_id}', {num_people}, '{arrival_date}', '{departure_date}', '{room_type}', '{room_number}')") - - # Update bookings table to set availability to 0 during dates of the reservation - room_type_abbrev = ''.join([word[0] for word in room_type.split()]).lower() - cursor.execute(f"UPDATE {room_type_abbrev}_bookings SET availability = 0, customer_id = '{unique_id}' WHERE room_number = '{room_number}' AND date BETWEEN '{arrival_date}' AND '{departure_date}'") - - # Delete temp files and empty reservation_cart table - os.remove('conversation_id.txt') - os.remove('room_num.txt') - cursor.execute(f"DELETE FROM reservation_cart WHERE conversation_id = '{conversation_id}'") - - conn.commit() - conn.close() - - return "Generate a final answer saying Booking is complete Thank you" - -def human(string): - return "Generate a final answer polietly asking customer for required information" - -tools = [ - Tool( - name="Greeting", - func=greet_llm.run, - description="useful for when you need to respond to greetings, make small talk or answer questions not related to rooms. use the message you need to repond to as the input to this tool. example if the user says 'whats the meaning of life' input should be 'whats the meaning of life'", - return_direct=True, - ), - Tool( - name="Understand date", - func=get_current_date, - description="always use this tool whenever the customer mentions a day, a month or time", - ), - Tool( - name='Room details', - func=room_details_chain.run, - description="use this tool to find and provide information about rooms such as options, prices. Input should be a fully formed question.", - ), - Tool( - name='Get available rooms for a given day', - func=get_available_rooms, - description="use this tool to find which rooms are availble for a given date. always use Understand date tool before using this tool. input to this tool is the date you want to check example input - 2023/06/30", - ), - Tool( - name="Start room reservation", - func=start_booking, - description="use this tool to start room reservation process", - ), - Tool( - name="Save name", - func=add_name, - description="use this tool to save customer name", - ), - Tool( - name="Save number of people", - func=add_people, - description="use this tool to save the number of people that will be staying. input to this tool should be an integer", - ), - Tool( - name="Save arrival date", - func=add_arrival_date, - description="use this tool to save customer arrival date. input should be in 2023/mm/dd format", - ), - Tool( - name="Save departure date", - func=add_departure_date, - description="use this tool to save customer departure date. input should be in 2023/mm/dd format", - ), - Tool( - name="Save room type", - func=add_room_type, - description="use this tool to save the room type", - ), - Tool( - name="Finish booking", - func=confirm_booking, - description="only use this tool after the customer have confimed the booking information", - ), - Tool( - name="Ask customer", - func=human, - description="use this tool to ask any of these from customer when needed. name, number of people, arrival date, departure date, prefered room type. only ask for one item at a time.", - ), - Tool( - name="Hotel details", - func=hotel_details_chain.run, - description="use this when you need to find details about the hotel. Input should be a fully formed question.", - ), -] \ No newline at end of file diff --git a/spaces/ashercn97/AsherTesting/modules/utils.py b/spaces/ashercn97/AsherTesting/modules/utils.py deleted file mode 100644 index e257de2dcc0b7364bb953f3658ba0eeec7a80f09..0000000000000000000000000000000000000000 --- a/spaces/ashercn97/AsherTesting/modules/utils.py +++ /dev/null @@ -1,130 +0,0 @@ -import os -import re -from datetime import datetime -from pathlib import Path - -from modules import shared -from modules.logging_colors import logger - - -# Helper function to get multiple values from shared.gradio -def gradio(*keys): - if len(keys) == 1 and type(keys[0]) is list: - keys = keys[0] - - return [shared.gradio[k] for k in keys] - - -def save_file(fname, contents): - if fname == '': - logger.error('File name is empty!') - return - - root_folder = Path(__file__).resolve().parent.parent - abs_path = Path(fname).resolve() - rel_path = abs_path.relative_to(root_folder) - if rel_path.parts[0] == '..': - logger.error(f'Invalid file path: {fname}') - return - - with open(abs_path, 'w', encoding='utf-8') as f: - f.write(contents) - - logger.info(f'Saved {abs_path}.') - - -def delete_file(fname): - if fname == '': - logger.error('File name is empty!') - return - - root_folder = Path(__file__).resolve().parent.parent - abs_path = Path(fname).resolve() - rel_path = abs_path.relative_to(root_folder) - if rel_path.parts[0] == '..': - logger.error(f'Invalid file path: {fname}') - return - - if abs_path.exists(): - abs_path.unlink() - logger.info(f'Deleted {fname}.') - - -def current_time(): - return f"{datetime.now().strftime('%Y-%m-%d-%H%M%S')}" - - -def atoi(text): - return int(text) if text.isdigit() else text.lower() - - -# Replace multiple string pairs in a string -def replace_all(text, dic): - for i, j in dic.items(): - text = text.replace(i, j) - - return text - - -def natural_keys(text): - return [atoi(c) for c in re.split(r'(\d+)', text)] - - -def get_available_models(): - if shared.args.flexgen: - return sorted([re.sub('-np$', '', item.name) for item in list(Path(f'{shared.args.model_dir}/').glob('*')) if item.name.endswith('-np')], key=natural_keys) - else: - return sorted([re.sub('.pth$', '', item.name) for item in list(Path(f'{shared.args.model_dir}/').glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json', '.yaml'))], key=natural_keys) - - -def get_available_presets(): - return sorted(set((k.stem for k in Path('presets').glob('*.yaml'))), key=natural_keys) - - -def get_available_prompts(): - prompts = [] - files = set((k.stem for k in Path('prompts').glob('*.txt'))) - prompts += sorted([k for k in files if re.match('^[0-9]', k)], key=natural_keys, reverse=True) - prompts += sorted([k for k in files if re.match('^[^0-9]', k)], key=natural_keys) - prompts += ['Instruct-' + k for k in get_available_instruction_templates() if k != 'None'] - prompts += ['None'] - return prompts - - -def get_available_characters(): - paths = (x for x in Path('characters').iterdir() if x.suffix in ('.json', '.yaml', '.yml')) - return ['None'] + sorted(set((k.stem for k in paths if k.stem != "instruction-following")), key=natural_keys) - - -def get_available_instruction_templates(): - path = "characters/instruction-following" - paths = [] - if os.path.exists(path): - paths = (x for x in Path(path).iterdir() if x.suffix in ('.json', '.yaml', '.yml')) - - return ['None'] + sorted(set((k.stem for k in paths)), key=natural_keys) - - -def get_available_extensions(): - return sorted(set(map(lambda x: x.parts[1], Path('extensions').glob('*/script.py'))), key=natural_keys) - - -def get_available_loras(): - return sorted([item.name for item in list(Path(shared.args.lora_dir).glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json'))], key=natural_keys) - - -def get_datasets(path: str, ext: str): - # include subdirectories for raw txt files to allow training from a subdirectory of txt files - if ext == "txt": - return ['None'] + sorted(set([k.stem for k in list(Path(path).glob('txt')) + list(Path(path).glob('*/')) if k.stem != 'put-trainer-datasets-here']), key=natural_keys) - - return ['None'] + sorted(set([k.stem for k in Path(path).glob(f'*.{ext}') if k.stem != 'put-trainer-datasets-here']), key=natural_keys) - - -def get_available_chat_styles(): - return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys) - - -def get_available_sessions(): - items = sorted(set(k.stem for k in Path('logs').glob(f'session_{shared.get_mode()}*')), key=natural_keys, reverse=True) - return [item for item in items if 'autosave' in item] + [item for item in items if 'autosave' not in item] diff --git a/spaces/ashhadahsan/summarizer-space/utils/openllmapi/login.py b/spaces/ashhadahsan/summarizer-space/utils/openllmapi/login.py deleted file mode 100644 index b982123301f846f8f4a89acd41e001e4fb89e955..0000000000000000000000000000000000000000 --- a/spaces/ashhadahsan/summarizer-space/utils/openllmapi/login.py +++ /dev/null @@ -1,183 +0,0 @@ -import requests -import os -import json -import logging -import re - - -class Login: - def __init__(self, email: str, passwd: str) -> None: - # self.COOKIE_DIR = os.path.dirname(os.path.abspath(__file__)) + "/usercookies" - # self.COOKIE_PATH = self.COOKIE_DIR + f"/{email}.json" - # if not os.path.exists(self.COOKIE_DIR): - # logging.debug("Cookie directory not found, creating...") - # os.makedirs(self.COOKIE_DIR) - # logging.debug(f"Cookie store path: {self.COOKIE_DIR}") - self.DEFAULT_PATH_DIR = os.path.dirname(os.path.abspath(__file__)) + "/usercookies" - self.DEFAULT_COOKIE_PATH = self.DEFAULT_PATH_DIR + f"/{email}.json" - - self.email: str = email - self.passwd: str = passwd - self.headers = { - "Referer": "https://huggingface.co/", - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.64", - } - self.cookies = requests.sessions.RequestsCookieJar() - - def requestsGet(self, url: str, params=None, allow_redirects=True) -> requests.Response: - res = requests.get( - url, - params=params, - headers=self.headers, - cookies=self.cookies, - allow_redirects=allow_redirects, - ) - self.refreshCookies(res.cookies) - return res - - def requestsPost(self, url: str, headers=None, params=None, data=None, stream=False, - allow_redirects=True) -> requests.Response: - res = requests.post( - url, - stream=stream, - params=params, - data=data, - headers=self.headers if headers == None else headers, - cookies=self.cookies, - allow_redirects=allow_redirects - ) - self.refreshCookies(res.cookies) - return res - - def refreshCookies(self, cookies: requests.sessions.RequestsCookieJar): - dic = cookies.get_dict() - for i in dic: - self.cookies.set(i, dic[i]) - - def SigninWithEmail(self): - """ - Login through your email and password. - PS: I found that it doesn't have any type of encrytion till now, - which could expose your password to the internet. - """ - url = "https://huggingface.co/login" - data = { - "username": self.email, - "password": self.passwd, - } - res = self.requestsPost(url=url, data=data, allow_redirects=False) - if res.status_code == 400: - raise Exception("wrong username or password") - - def getAuthURL(self): - url = "https://huggingface.co/chat/login" - headers = { - "Referer": "https://huggingface.co/chat/login", - "User-Agent": self.headers["User-Agent"], - "Content-Type": "application/x-www-form-urlencoded" - } - res = self.requestsPost(url, headers=headers, allow_redirects=False) - if res.status_code == 200: - # location = res.headers.get("Location", None) - location = res.json()["location"] - if location: - return location - else: - raise Exception("No authorize url found, please check your email or password.") - elif res.status_code == 303: - location = res.headers.get("Location") - if location: - return location - else: - raise Exception("No authorize url found, please check your email or password.") - else: - raise Exception("Something went wrong!") - - def grantAuth(self, url: str) -> int: - res = self.requestsGet(url, allow_redirects=False) - if res.headers.__contains__("location"): - location = res.headers["location"] - res = self.requestsGet(location, allow_redirects=False) - if res.cookies.__contains__("hf-chat"): - return 1 - # raise Exception("grantAuth fatal") - if res.status_code != 200: - raise Exception("grant auth fatal!") - csrf = re.findall('/oauth/authorize.*?name="csrf" value="(.*?)"', res.text) - if len(csrf) == 0: - raise Exception("No csrf found!") - data = { - "csrf": csrf[0] - } - - res = self.requestsPost(url, data=data, allow_redirects=False) - if res.status_code != 303: - raise Exception(f"get hf-chat cookies fatal! - {res.status_code}") - else: - location = res.headers.get("Location") - res = self.requestsGet(location, allow_redirects=False) - if res.status_code != 302: - raise Exception(f"get hf-chat cookie fatal! - {res.status_code}") - else: - return 1 - - def login(self) -> requests.sessions.RequestsCookieJar: - self.SigninWithEmail() - location = self.getAuthURL() - if self.grantAuth(location): - return self.cookies - else: - raise Exception(f"Grant auth fatal, please check your email or password\ncookies gained: \n{self.cookies}") - - def saveCookiesToDir(self, cookie_dir_path: str = None) -> str: - """ - cookies will be saved into: cookie_dir_path/<email>.json - """ - cookie_dir_path = self.DEFAULT_PATH_DIR if not cookie_dir_path else cookie_dir_path - if not cookie_dir_path.endswith("/"): - cookie_dir_path += "/" - cookie_path = cookie_dir_path + f"{self.email}.json" - if not os.path.exists(cookie_dir_path): - logging.info("Cookie directory not exist, creating...") - os.makedirs(cookie_dir_path) - logging.info(f"Cookie store path: {cookie_path}") - - with open(cookie_path, "w", encoding="utf-8") as f: - f.write(json.dumps(self.cookies.get_dict())) - return cookie_path - - def _getCookiePath(self, cookie_dir_path) -> str: - if not cookie_dir_path.endswith("/"): - cookie_dir_path += "/" - if not os.path.exists(cookie_dir_path): - return "" - files = os.listdir(cookie_dir_path) - for i in files: - if i == f"{self.email}.json": - return cookie_dir_path + i - return "" - - def loadCookiesFromDir(self, cookie_dir_path: str = None) -> requests.sessions.RequestsCookieJar: - """ - cookie files needs to be named as: cookie_dir_path/<email>.json - """ - cookie_dir_path = self.DEFAULT_PATH_DIR if not cookie_dir_path else cookie_dir_path - cookie_path = self._getCookiePath(cookie_dir_path) - if not cookie_path: - raise Exception(f"Cookie not found. please check the path given: {cookie_dir_path}.\n" + - f"Cookie file must be named like this: 'your_email'+'.json': '{self.email}.json'") - - with open(cookie_path, "r", encoding="utf-8") as f: - try: - js = json.loads(f.read()) - for i in js.keys(): - self.cookies.set(i, js[i]) - logging.info(f"{i} loaded") - return self.cookies - except: - raise Exception("load cookies from files fatal. Please check the format") - - -if __name__ == "__main__": - EMAIL = os.getenv("EMAIL") - PASSWD = os.getenv("PASSWD") diff --git a/spaces/ashu3984/Dialogue_summarization/app.py b/spaces/ashu3984/Dialogue_summarization/app.py deleted file mode 100644 index 7372eb2b40db71936018a43042f12a3ae3d49902..0000000000000000000000000000000000000000 --- a/spaces/ashu3984/Dialogue_summarization/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/ashu3984/Text_summarizer").launch() \ No newline at end of file diff --git a/spaces/autosummproject/autosumm/translation/translation.py b/spaces/autosummproject/autosumm/translation/translation.py deleted file mode 100644 index 6438212e628556e59cb13436a923719087bd6cf6..0000000000000000000000000000000000000000 --- a/spaces/autosummproject/autosumm/translation/translation.py +++ /dev/null @@ -1,13 +0,0 @@ -from deep_translator import GoogleTranslator -from easynmt import EasyNMT -from utils.timing import Timer - -@Timer.time_it('tradução', 'translation') -def translate(text, source_language, target_language): - try: - print("Trying to use Google Translator...") - return GoogleTranslator(source = source_language, target = target_language).translate(text) - except: - print("Google Translator is not working... using M2M100") - translation_model = EasyNMT('m2m_100_418M') - return translation_model.translate(text, source_lang = source_language, target_lang = target_language) \ No newline at end of file diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/optimUtils.py b/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/optimUtils.py deleted file mode 100644 index 18b996792a27b6f628bda578bb2a3ec64f406f23..0000000000000000000000000000000000000000 --- a/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/optimUtils.py +++ /dev/null @@ -1,73 +0,0 @@ -import os -import pandas as pd - - -def split_weighted_subprompts(text): - """ - grabs all text up to the first occurrence of ':' - uses the grabbed text as a sub-prompt, and takes the value following ':' as weight - if ':' has no value defined, defaults to 1.0 - repeats until no text remaining - """ - remaining = len(text) - prompts = [] - weights = [] - while remaining > 0: - if ":" in text: - idx = text.index(":") # first occurrence from start - # grab up to index as sub-prompt - prompt = text[:idx] - remaining -= idx - # remove from main text - text = text[idx+1:] - # find value for weight - if " " in text: - idx = text.index(" ") # first occurence - else: # no space, read to end - idx = len(text) - if idx != 0: - try: - weight = float(text[:idx]) - except: # couldn't treat as float - print(f"Warning: '{text[:idx]}' is not a value, are you missing a space?") - weight = 1.0 - else: # no value found - weight = 1.0 - # remove from main text - remaining -= idx - text = text[idx+1:] - # append the sub-prompt and its weight - prompts.append(prompt) - weights.append(weight) - else: # no : found - if len(text) > 0: # there is still text though - # take remainder as weight 1 - prompts.append(text) - weights.append(1.0) - remaining = 0 - return prompts, weights - -def logger(params, log_csv): - os.makedirs('logs', exist_ok=True) - cols = [arg for arg, _ in params.items()] - if not os.path.exists(log_csv): - df = pd.DataFrame(columns=cols) - df.to_csv(log_csv, index=False) - - df = pd.read_csv(log_csv) - for arg in cols: - if arg not in df.columns: - df[arg] = "" - df.to_csv(log_csv, index = False) - - li = {} - cols = [col for col in df.columns] - data = {arg:value for arg, value in params.items()} - for col in cols: - if col in data: - li[col] = data[col] - else: - li[col] = '' - - df = pd.DataFrame(li,index = [0]) - df.to_csv(log_csv,index=False, mode='a', header=False) \ No newline at end of file diff --git a/spaces/awacke1/ChatBotPersonalities/README.md b/spaces/awacke1/ChatBotPersonalities/README.md deleted file mode 100644 index 0b84b4696f2140b7ee2042e26e6c518399c88051..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ChatBotPersonalities/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ChatBotPersonalities -emoji: 😻 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.0.22 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/ParallelSummaryModel/README.md b/spaces/awacke1/ParallelSummaryModel/README.md deleted file mode 100644 index b2a547de4c871b64a6b36afb48dd58a6aa58b6cf..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ParallelSummaryModel/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ⚡NLP Parallel Summary Mindful Tip Gen🚀 -emoji: ⚡🚀 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/badayvedat/LLaVA/llava/model/make_delta.py b/spaces/badayvedat/LLaVA/llava/model/make_delta.py deleted file mode 100644 index 4ae55d59c2c8bab80299272314a41bbeb959d8ed..0000000000000000000000000000000000000000 --- a/spaces/badayvedat/LLaVA/llava/model/make_delta.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -Usage: -python3 -m llava.model.make_delta --base ~/model_weights/llama-7b --target ~/model_weights/llava-7b --delta ~/model_weights/llava-7b-delta --hub-repo-id liuhaotian/llava-7b-delta -""" -import argparse - -import torch -from tqdm import tqdm -from transformers import AutoTokenizer, AutoModelForCausalLM -from llava.model.utils import auto_upgrade - - -def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id): - print("Loading base model") - base = AutoModelForCausalLM.from_pretrained( - base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) - - print("Loading target model") - auto_upgrade(target_model_path) - target = AutoModelForCausalLM.from_pretrained(target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) - - print("Calculating delta") - for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"): - if name not in base.state_dict(): - assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model' - continue - if param.data.shape == base.state_dict()[name].shape: - param.data -= base.state_dict()[name] - else: - assert name in ['model.embed_tokens.weight', 'lm_head.weight'], f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}' - bparam = base.state_dict()[name] - param.data[:bparam.shape[0], :bparam.shape[1]] -= bparam - - print("Saving delta") - if hub_repo_id: - kwargs = {"push_to_hub": True, "repo_id": hub_repo_id} - else: - kwargs = {} - target.save_pretrained(delta_path, **kwargs) - target_tokenizer = AutoTokenizer.from_pretrained(target_model_path) - target_tokenizer.save_pretrained(delta_path, **kwargs) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--base-model-path", type=str, required=True) - parser.add_argument("--target-model-path", type=str, required=True) - parser.add_argument("--delta-path", type=str, required=True) - parser.add_argument("--hub-repo-id", type=str, default=None) - args = parser.parse_args() - - make_delta(args.base_model_path, args.target_model_path, args.delta_path, args.hub_repo_id) diff --git a/spaces/badongtakla/ithaca/ithaca/eval/__init__.py b/spaces/badongtakla/ithaca/ithaca/eval/__init__.py deleted file mode 100644 index 9b2a3f45a31d6bfc90a023c27ccfccaabdf22ba4..0000000000000000000000000000000000000000 --- a/spaces/badongtakla/ithaca/ithaca/eval/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2021 the Ithaca Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/controls/DeviceOrientationControls.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/controls/DeviceOrientationControls.js deleted file mode 100644 index cb1e8b750ce2d9e110318cd676caf3279dc0048b..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/controls/DeviceOrientationControls.js +++ /dev/null @@ -1,111 +0,0 @@ -/** - * @author richt / http://richt.me - * @author WestLangley / http://github.com/WestLangley - * - * W3C Device Orientation control (http://w3c.github.io/deviceorientation/spec-source-orientation.html) - */ - -THREE.DeviceOrientationControls = function ( object ) { - - var scope = this; - - this.object = object; - this.object.rotation.reorder( 'YXZ' ); - - this.enabled = true; - - this.deviceOrientation = {}; - this.screenOrientation = 0; - - this.alphaOffset = 0; // radians - - var onDeviceOrientationChangeEvent = function ( event ) { - - scope.deviceOrientation = event; - - }; - - var onScreenOrientationChangeEvent = function () { - - scope.screenOrientation = window.orientation || 0; - - }; - - // The angles alpha, beta and gamma form a set of intrinsic Tait-Bryan angles of type Z-X'-Y'' - - var setObjectQuaternion = function () { - - var zee = new THREE.Vector3( 0, 0, 1 ); - - var euler = new THREE.Euler(); - - var q0 = new THREE.Quaternion(); - - var q1 = new THREE.Quaternion( - Math.sqrt( 0.5 ), 0, 0, Math.sqrt( 0.5 ) ); // - PI/2 around the x-axis - - return function ( quaternion, alpha, beta, gamma, orient ) { - - euler.set( beta, alpha, - gamma, 'YXZ' ); // 'ZXY' for the device, but 'YXZ' for us - - quaternion.setFromEuler( euler ); // orient the device - - quaternion.multiply( q1 ); // camera looks out the back of the device, not the top - - quaternion.multiply( q0.setFromAxisAngle( zee, - orient ) ); // adjust for screen orientation - - }; - - }(); - - this.connect = function () { - - onScreenOrientationChangeEvent(); // run once on load - - window.addEventListener( 'orientationchange', onScreenOrientationChangeEvent, false ); - window.addEventListener( 'deviceorientation', onDeviceOrientationChangeEvent, false ); - - scope.enabled = true; - - }; - - this.disconnect = function () { - - window.removeEventListener( 'orientationchange', onScreenOrientationChangeEvent, false ); - window.removeEventListener( 'deviceorientation', onDeviceOrientationChangeEvent, false ); - - scope.enabled = false; - - }; - - this.update = function () { - - if ( scope.enabled === false ) return; - - var device = scope.deviceOrientation; - - if ( device ) { - - var alpha = device.alpha ? THREE.Math.degToRad( device.alpha ) + scope.alphaOffset : 0; // Z - - var beta = device.beta ? THREE.Math.degToRad( device.beta ) : 0; // X' - - var gamma = device.gamma ? THREE.Math.degToRad( device.gamma ) : 0; // Y'' - - var orient = scope.screenOrientation ? THREE.Math.degToRad( scope.screenOrientation ) : 0; // O - - setObjectQuaternion( scope.object.quaternion, alpha, beta, gamma, orient ); - - } - - - }; - - this.dispose = function () { - - scope.disconnect(); - - }; - - this.connect(); - -}; diff --git a/spaces/baulab/Erasing-Concepts-In-Diffusion/StableDiffuser.py b/spaces/baulab/Erasing-Concepts-In-Diffusion/StableDiffuser.py deleted file mode 100644 index 60b01c7b1a461d6a23b57f48365b65f80bd360aa..0000000000000000000000000000000000000000 --- a/spaces/baulab/Erasing-Concepts-In-Diffusion/StableDiffuser.py +++ /dev/null @@ -1,279 +0,0 @@ -import argparse - -import torch -from baukit import TraceDict -from diffusers import AutoencoderKL, UNet2DConditionModel -from PIL import Image -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer, CLIPFeatureExtractor -from diffusers.schedulers import EulerAncestralDiscreteScheduler -from diffusers.schedulers.scheduling_ddim import DDIMScheduler -from diffusers.schedulers.scheduling_ddpm import DDPMScheduler -from diffusers.schedulers.scheduling_lms_discrete import LMSDiscreteScheduler -from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker -import util - - -def default_parser(): - - parser = argparse.ArgumentParser() - - parser.add_argument('prompts', type=str, nargs='+') - parser.add_argument('outpath', type=str) - - parser.add_argument('--images', type=str, nargs='+', default=None) - parser.add_argument('--nsteps', type=int, default=1000) - parser.add_argument('--nimgs', type=int, default=1) - parser.add_argument('--start_itr', type=int, default=0) - parser.add_argument('--return_steps', action='store_true', default=False) - parser.add_argument('--pred_x0', action='store_true', default=False) - parser.add_argument('--device', type=str, default='cuda:0') - parser.add_argument('--seed', type=int, default=42) - - return parser - - -class StableDiffuser(torch.nn.Module): - - def __init__(self, - scheduler='LMS' - ): - - super().__init__() - - # Load the autoencoder model which will be used to decode the latents into image space. - self.vae = AutoencoderKL.from_pretrained( - "CompVis/stable-diffusion-v1-4", subfolder="vae") - - # Load the tokenizer and text encoder to tokenize and encode the text. - self.tokenizer = CLIPTokenizer.from_pretrained( - "openai/clip-vit-large-patch14") - self.text_encoder = CLIPTextModel.from_pretrained( - "openai/clip-vit-large-patch14") - - # The UNet model for generating the latents. - self.unet = UNet2DConditionModel.from_pretrained( - "CompVis/stable-diffusion-v1-4", subfolder="unet") - - self.feature_extractor = CLIPFeatureExtractor.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="feature_extractor") - self.safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="safety_checker") - - if scheduler == 'LMS': - self.scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000) - elif scheduler == 'DDIM': - self.scheduler = DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") - elif scheduler == 'DDPM': - self.scheduler = DDPMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") - - self.eval() - - def get_noise(self, batch_size, img_size, generator=None): - - param = list(self.parameters())[0] - - return torch.randn( - (batch_size, self.unet.in_channels, img_size // 8, img_size // 8), - generator=generator).type(param.dtype).to(param.device) - - def add_noise(self, latents, noise, step): - - return self.scheduler.add_noise(latents, noise, torch.tensor([self.scheduler.timesteps[step]])) - - def text_tokenize(self, prompts): - - return self.tokenizer(prompts, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt") - - def text_detokenize(self, tokens): - - return [self.tokenizer.decode(token) for token in tokens if token != self.tokenizer.vocab_size - 1] - - def text_encode(self, tokens): - - return self.text_encoder(tokens.input_ids.to(self.unet.device))[0] - - def decode(self, latents): - - return self.vae.decode(1 / self.vae.config.scaling_factor * latents).sample - - def encode(self, tensors): - - return self.vae.encode(tensors).latent_dist.mode() * 0.18215 - - def to_image(self, image): - - image = (image / 2 + 0.5).clamp(0, 1) - image = image.detach().cpu().permute(0, 2, 3, 1).numpy() - images = (image * 255).round().astype("uint8") - pil_images = [Image.fromarray(image) for image in images] - - return pil_images - - def set_scheduler_timesteps(self, n_steps): - self.scheduler.set_timesteps(n_steps, device=self.unet.device) - - def get_initial_latents(self, n_imgs, img_size, n_prompts, generator=None): - - noise = self.get_noise(n_imgs, img_size, generator=generator).repeat(n_prompts, 1, 1, 1) - - latents = noise * self.scheduler.init_noise_sigma - - return latents - - def get_text_embeddings(self, prompts, n_imgs): - - text_tokens = self.text_tokenize(prompts) - - text_embeddings = self.text_encode(text_tokens) - - unconditional_tokens = self.text_tokenize([""] * len(prompts)) - - unconditional_embeddings = self.text_encode(unconditional_tokens) - - text_embeddings = torch.cat([unconditional_embeddings, text_embeddings]).repeat_interleave(n_imgs, dim=0) - - return text_embeddings - - def predict_noise(self, - iteration, - latents, - text_embeddings, - guidance_scale=7.5 - ): - - # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. - latents = torch.cat([latents] * 2) - latents = self.scheduler.scale_model_input( - latents, self.scheduler.timesteps[iteration]) - - # predict the noise residual - noise_prediction = self.unet( - latents, self.scheduler.timesteps[iteration], encoder_hidden_states=text_embeddings).sample - - # perform guidance - noise_prediction_uncond, noise_prediction_text = noise_prediction.chunk(2) - noise_prediction = noise_prediction_uncond + guidance_scale * \ - (noise_prediction_text - noise_prediction_uncond) - - return noise_prediction - - @torch.no_grad() - def diffusion(self, - latents, - text_embeddings, - end_iteration=1000, - start_iteration=0, - return_steps=False, - pred_x0=False, - trace_args=None, - show_progress=True, - **kwargs): - - latents_steps = [] - trace_steps = [] - - trace = None - - for iteration in tqdm(range(start_iteration, end_iteration), disable=not show_progress): - - if trace_args: - - trace = TraceDict(self, **trace_args) - - noise_pred = self.predict_noise( - iteration, - latents, - text_embeddings, - **kwargs) - - # compute the previous noisy sample x_t -> x_t-1 - output = self.scheduler.step(noise_pred, self.scheduler.timesteps[iteration], latents) - - if trace_args: - - trace.close() - - trace_steps.append(trace) - - latents = output.prev_sample - - if return_steps or iteration == end_iteration - 1: - - output = output.pred_original_sample if pred_x0 else latents - - if return_steps: - latents_steps.append(output.cpu()) - else: - latents_steps.append(output) - - return latents_steps, trace_steps - - @torch.no_grad() - def __call__(self, - prompts, - img_size=512, - n_steps=50, - n_imgs=1, - end_iteration=None, - generator=None, - **kwargs - ): - - assert 0 <= n_steps <= 1000 - - if not isinstance(prompts, list): - - prompts = [prompts] - - self.set_scheduler_timesteps(n_steps) - - latents = self.get_initial_latents(n_imgs, img_size, len(prompts), generator=generator) - - text_embeddings = self.get_text_embeddings(prompts,n_imgs=n_imgs) - - end_iteration = end_iteration or n_steps - - latents_steps, trace_steps = self.diffusion( - latents, - text_embeddings, - end_iteration=end_iteration, - **kwargs - ) - - latents_steps = [self.decode(latents.to(self.unet.device)) for latents in latents_steps] - images_steps = [self.to_image(latents) for latents in latents_steps] - - for i in range(len(images_steps)): - self.safety_checker = self.safety_checker.float() - safety_checker_input = self.feature_extractor(images_steps[i], return_tensors="pt").to(latents_steps[0].device) - image, has_nsfw_concept = self.safety_checker( - images=latents_steps[i].float().cpu().numpy(), clip_input=safety_checker_input.pixel_values.float() - ) - - images_steps[i][0] = self.to_image(torch.from_numpy(image))[0] - - images_steps = list(zip(*images_steps)) - - if trace_steps: - - return images_steps, trace_steps - - return images_steps - - -if __name__ == '__main__': - - parser = default_parser() - - args = parser.parse_args() - - diffuser = StableDiffuser(seed=args.seed, scheduler='DDIM').to(torch.device(args.device)).half() - - images = diffuser(args.prompts, - n_steps=args.nsteps, - n_imgs=args.nimgs, - start_iteration=args.start_itr, - return_steps=args.return_steps, - pred_x0=args.pred_x0 - ) - - util.image_grid(images, args.outpath) \ No newline at end of file diff --git a/spaces/bigjoker/stable-diffusion-webui/README.md b/spaces/bigjoker/stable-diffusion-webui/README.md deleted file mode 100644 index 55105e500ec05540f173f433aee7b7aa36c4d5df..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/README.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: Stable Diffusion Webui -emoji: 🌖 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.19.1 -app_file: launch.py -pinned: false -duplicated_from: user238921933/stable-diffusion-webui ---- - -# Stable Diffusion web UI -A browser interface based on Gradio library for Stable Diffusion. - -![](screenshot.png) - -## Features -[Detailed feature showcase with images](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features): -- Original txt2img and img2img modes -- One click install and run script (but you still must install python and git) -- Outpainting -- Inpainting -- Color Sketch -- Prompt Matrix -- Stable Diffusion Upscale -- Attention, specify parts of text that the model should pay more attention to - - a man in a ((tuxedo)) - will pay more attention to tuxedo - - a man in a (tuxedo:1.21) - alternative syntax - - select text and press ctrl+up or ctrl+down to automatically adjust attention to selected text (code contributed by anonymous user) -- Loopback, run img2img processing multiple times -- X/Y/Z plot, a way to draw a 3 dimensional plot of images with different parameters -- Textual Inversion - - have as many embeddings as you want and use any names you like for them - - use multiple embeddings with different numbers of vectors per token - - works with half precision floating point numbers - - train embeddings on 8GB (also reports of 6GB working) -- Extras tab with: - - GFPGAN, neural network that fixes faces - - CodeFormer, face restoration tool as an alternative to GFPGAN - - RealESRGAN, neural network upscaler - - ESRGAN, neural network upscaler with a lot of third party models - - SwinIR and Swin2SR([see here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/2092)), neural network upscalers - - LDSR, Latent diffusion super resolution upscaling -- Resizing aspect ratio options -- Sampling method selection - - Adjust sampler eta values (noise multiplier) - - More advanced noise setting options -- Interrupt processing at any time -- 4GB video card support (also reports of 2GB working) -- Correct seeds for batches -- Live prompt token length validation -- Generation parameters - - parameters you used to generate images are saved with that image - - in PNG chunks for PNG, in EXIF for JPEG - - can drag the image to PNG info tab to restore generation parameters and automatically copy them into UI - - can be disabled in settings - - drag and drop an image/text-parameters to promptbox -- Read Generation Parameters Button, loads parameters in promptbox to UI -- Settings page -- Running arbitrary python code from UI (must run with --allow-code to enable) -- Mouseover hints for most UI elements -- Possible to change defaults/mix/max/step values for UI elements via text config -- Tiling support, a checkbox to create images that can be tiled like textures -- Progress bar and live image generation preview - - Can use a separate neural network to produce previews with almost none VRAM or compute requirement -- Negative prompt, an extra text field that allows you to list what you don't want to see in generated image -- Styles, a way to save part of prompt and easily apply them via dropdown later -- Variations, a way to generate same image but with tiny differences -- Seed resizing, a way to generate same image but at slightly different resolution -- CLIP interrogator, a button that tries to guess prompt from an image -- Prompt Editing, a way to change prompt mid-generation, say to start making a watermelon and switch to anime girl midway -- Batch Processing, process a group of files using img2img -- Img2img Alternative, reverse Euler method of cross attention control -- Highres Fix, a convenience option to produce high resolution pictures in one click without usual distortions -- Reloading checkpoints on the fly -- Checkpoint Merger, a tab that allows you to merge up to 3 checkpoints into one -- [Custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Scripts) with many extensions from community -- [Composable-Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/), a way to use multiple prompts at once - - separate prompts using uppercase `AND` - - also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2` -- No token limit for prompts (original stable diffusion lets you use up to 75 tokens) -- DeepDanbooru integration, creates danbooru style tags for anime prompts -- [xformers](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add --xformers to commandline args) -- via extension: [History tab](https://github.com/yfszzx/stable-diffusion-webui-images-browser): view, direct and delete images conveniently within the UI -- Generate forever option -- Training tab - - hypernetworks and embeddings options - - Preprocessing images: cropping, mirroring, autotagging using BLIP or deepdanbooru (for anime) -- Clip skip -- Hypernetworks -- Loras (same as Hypernetworks but more pretty) -- A sparate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt. -- Can select to load a different VAE from settings screen -- Estimated completion time in progress bar -- API -- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML. -- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embeds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients)) -- [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions -- [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions -- Now without any bad letters! -- Load checkpoints in safetensors format -- Eased resolution restriction: generated image's domension must be a multiple of 8 rather than 64 -- Now with a license! -- Reorder elements in the UI from settings screen -- - -## Installation and Running -Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs. - -Alternatively, use online services (like Google Colab): - -- [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) - -### Automatic Installation on Windows -1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH" -2. Install [git](https://git-scm.com/download/win). -3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`. -4. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user. - -### Automatic Installation on Linux -1. Install the dependencies: -```bash -# Debian-based: -sudo apt install wget git python3 python3-venv -# Red Hat-based: -sudo dnf install wget git python3 -# Arch-based: -sudo pacman -S wget git python3 -``` -2. To install in `/home/$(whoami)/stable-diffusion-webui/`, run: -```bash -bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh) -``` -3. Run `webui.sh`. -### Installation on Apple Silicon - -Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon). - -## Contributing -Here's how to add code to this repo: [Contributing](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) - -## Documentation -The documentation was moved from this README over to the project's [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki). - -## Credits -Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file. - -- Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers -- k-diffusion - https://github.com/crowsonkb/k-diffusion.git -- GFPGAN - https://github.com/TencentARC/GFPGAN.git -- CodeFormer - https://github.com/sczhou/CodeFormer -- ESRGAN - https://github.com/xinntao/ESRGAN -- SwinIR - https://github.com/JingyunLiang/SwinIR -- Swin2SR - https://github.com/mv-lab/swin2sr -- LDSR - https://github.com/Hafiidz/latent-diffusion -- MiDaS - https://github.com/isl-org/MiDaS -- Ideas for optimizations - https://github.com/basujindal/stable-diffusion -- Cross Attention layer optimization - Doggettx - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing. -- Cross Attention layer optimization - InvokeAI, lstein - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion) -- Sub-quadratic Cross Attention layer optimization - Alex Birch (https://github.com/Birch-san/diffusers/pull/1), Amin Rezaei (https://github.com/AminRezaei0x443/memory-efficient-attention) -- Textual Inversion - Rinon Gal - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas). -- Idea for SD upscale - https://github.com/jquesnelle/txt2imghd -- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot -- CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator -- Idea for Composable Diffusion - https://github.com/energy-based-model/Compositional-Visual-Generation-with-Composable-Diffusion-Models-PyTorch -- xformers - https://github.com/facebookresearch/xformers -- DeepDanbooru - interrogator for anime diffusers https://github.com/KichangKim/DeepDanbooru -- Sampling in float32 precision from a float16 UNet - marunine for the idea, Birch-san for the example Diffusers implementation (https://github.com/Birch-san/diffusers-play/tree/92feee6) -- Instruct pix2pix - Tim Brooks (star), Aleksander Holynski (star), Alexei A. Efros (no star) - https://github.com/timothybrooks/instruct-pix2pix -- Security advice - RyotaK -- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. -- (You) diff --git a/spaces/bigjoker/stable-diffusion-webui/modules/styles.py b/spaces/bigjoker/stable-diffusion-webui/modules/styles.py deleted file mode 100644 index d635c0109a1afd8867ef29b2d66ad864e1658113..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/modules/styles.py +++ /dev/null @@ -1,87 +0,0 @@ -# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime -from __future__ import annotations - -import csv -import os -import os.path -import typing -import collections.abc as abc -import tempfile -import shutil - -if typing.TYPE_CHECKING: - # Only import this when code is being type-checked, it doesn't have any effect at runtime - from .processing import StableDiffusionProcessing - - -class PromptStyle(typing.NamedTuple): - name: str - prompt: str - negative_prompt: str - - -def merge_prompts(style_prompt: str, prompt: str) -> str: - if "{prompt}" in style_prompt: - res = style_prompt.replace("{prompt}", prompt) - else: - parts = filter(None, (prompt.strip(), style_prompt.strip())) - res = ", ".join(parts) - - return res - - -def apply_styles_to_prompt(prompt, styles): - for style in styles: - prompt = merge_prompts(style, prompt) - - return prompt - - -class StyleDatabase: - def __init__(self, path: str): - self.no_style = PromptStyle("None", "", "") - self.styles = {} - self.path = path - - self.reload() - - def reload(self): - self.styles.clear() - - if not os.path.exists(self.path): - return - - with open(self.path, "r", encoding="utf-8-sig", newline='') as file: - reader = csv.DictReader(file) - for row in reader: - # Support loading old CSV format with "name, text"-columns - prompt = row["prompt"] if "prompt" in row else row["text"] - negative_prompt = row.get("negative_prompt", "") - self.styles[row["name"]] = PromptStyle(row["name"], prompt, negative_prompt) - - def get_style_prompts(self, styles): - return [self.styles.get(x, self.no_style).prompt for x in styles] - - def get_negative_style_prompts(self, styles): - return [self.styles.get(x, self.no_style).negative_prompt for x in styles] - - def apply_styles_to_prompt(self, prompt, styles): - return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).prompt for x in styles]) - - def apply_negative_styles_to_prompt(self, prompt, styles): - return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles]) - - def save_styles(self, path: str) -> None: - # Write to temporary file first, so we don't nuke the file if something goes wrong - fd, temp_path = tempfile.mkstemp(".csv") - with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file: - # _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple, - # and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict() - writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) - writer.writeheader() - writer.writerows(style._asdict() for k, style in self.styles.items()) - - # Always keep a backup file around - if os.path.exists(path): - shutil.move(path, path + ".bak") - shutil.move(temp_path, path) diff --git a/spaces/bioriAsaeru/text-to-voice/Lanschool 7.7 Torrent UPD.md b/spaces/bioriAsaeru/text-to-voice/Lanschool 7.7 Torrent UPD.md deleted file mode 100644 index 2249a3483cea386fab94f6c5598ff2434509a5ac..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Lanschool 7.7 Torrent UPD.md +++ /dev/null @@ -1,16 +0,0 @@ -<h2>Lanschool 7.7 Torrent</h2><br /><p><b><b>Download File</b> 🗹 <a href="https://urloso.com/2uyR8O">https://urloso.com/2uyR8O</a></b></p><br /><br /> -<br /> -It allows you to manage class, attendance, assignments, quizzes, reports, groups, grades, projects and many other class administration tasks. The software is available in both Windows and Linux versions. LanSchool includes database features, a calendar, attendance, homework, reference database and many other features. LanSchool 7.7 is free to students and teachers, and the company offers its paid service for both. A license for LanSchool 7.7 is available for $25 per teacher or $50 per school year. - -One feature of LanSchool 7.7 is the ability to use a USB drive as a database. This could prove useful if you are teaching in a remote location. It would allow students to access course materials from any computer that is connected to the Internet. You would simply plug in your USB drive and LanSchool would automatically recognize it. - -Another interesting feature of LanSchool 7.7 is the ability to teach a class from your desktop. You could have LanSchool start a new class and make it the default class for students. This would allow students to access your desktop, look around and access your content for the course. The school could use the space for supplemental materials, or they could even provide a "hidden desktop" with your own material. - -LanSchool 7.7 also includes the ability to email grades. Students can submit their assignments online and receive their grade directly via email. This makes it easy for students to see where they stand on assignments and make any needed corrections. Teachers can also keep track of all of their students' progress and assignments, which makes their work easier. This is especially helpful if you are teaching in a classroom with multiple sections, or if you have students in several different locations or who come from different backgrounds. - -The Teachers Edition of LanSchool 7.7 is a paid upgrade for schools. This version includes several additional features that can be useful for teachers. It includes full Windows Vista compatibility, an enhanced classroom assignment calendar, a teacher's classroom console, the ability to track students' progress and multiple user permissions for student folders. The teachers edition also includes the ability to administer student accounts, grades, assignments and much more. The teachers edition is available for $250 per school or $1,000 per year. - -This is the second in a series of articles on LanSchool 7.7, the free version of the software. In the first article we looked at the course and student administration features. In this article we will look at the teacher administration features. 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/bioriAsaeru/text-to-voice/Laser Printer Working Principle Pdf Download [UPD].md b/spaces/bioriAsaeru/text-to-voice/Laser Printer Working Principle Pdf Download [UPD].md deleted file mode 100644 index d6d955a6589c4d7b6886dbdb0533172c3095c39d..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Laser Printer Working Principle Pdf Download [UPD].md +++ /dev/null @@ -1,28 +0,0 @@ -<h2>Laser Printer Working Principle Pdf Download</h2><br /><p><b><b>Download Zip</b> ✔✔✔ <a href="https://urloso.com/2uyOXV">https://urloso.com/2uyOXV</a></b></p><br /><br /> - -The first laser printer in the world. The Canon LBP-2900 is a pocket laser printer that's easy to use and it provides the. - -3. Don't use your printer during the warm up time for your. Sometime, you may find that your printer is being stopped by a signal from. These do have a warm up period, so you should check. - -How to troubleshoot a laser printer that doesn't print,. If it doesn't print, try your best to disconnect all cables, remove the cover, and check any power cable. 1. That laser printer may be only sporadically stopping and starting. - -Laser Printers - -How to troubleshoot a laser printer that. The first laser printer in the world. Laser printers have an option to avoid stopping the mechanism when. It's definitely a weird problem, so don't know exactly how to troubleshoot it. - -How to Troubleshoot a Laser Printer that. How to Troubleshoot a Laser Printer that. Apr 21, 2017. The printer is getting this error because the thermal print head (HP laser print head. Laser printers use toner to print, which is a powder which is put. - -How to Troubleshoot a Laser Printer that. These can also be installed on the laser printer. Laser printer toner is a printing powder that's put on paper. It is developed using electrophotography and heating, thus. Apr 21, 2017. The printer is getting this error because the thermal print head (HP laser print head. Laser printers use toner to print, which is a powder which is put. - -Laser printers have an option to avoid stopping the mechanism when the. It's definitely a weird problem, so don't know exactly how to troubleshoot it. - -How to Troubleshoot a Laser Printer that. 3. Wait until the printer is warmed up before trying to print or copy. The first laser printer in the world. - -How to Troubleshoot a Laser Printer that. 3. Don't use your printer during the warm up time for your. Sometime, you may find that your printer is being stopped by a signal from. How to Troubleshoot a Laser Printer that. - -Laser printers have an option to avoid stopping the mechanism when. The first laser printer in the world. - -How to Troubleshoot a Laser Printer that. Laser printers have 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/brainblow/MusiCreator/audiocraft/modules/streaming.py b/spaces/brainblow/MusiCreator/audiocraft/modules/streaming.py deleted file mode 100644 index fdbdf5e90fc0c6560873d66bf273460b38e5ed7e..0000000000000000000000000000000000000000 --- a/spaces/brainblow/MusiCreator/audiocraft/modules/streaming.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Streaming module API that should be implemented by all Streaming components, -""" - -from contextlib import contextmanager -import typing as tp -from torch import nn -import torch - - -State = tp.Dict[str, torch.Tensor] - - -class StreamingModule(nn.Module): - """Common API for streaming components. - - Each streaming component has a streaming state, which is just a dict[str, Tensor]. - By convention, the first dim of each tensor must be the batch size. - Don't use dots in the key names, as this would clash with submodules - (like in state_dict). - - If `self._is_streaming` is True, the component should use and remember - the proper state inside `self._streaming_state`. - - To set a streaming component in streaming state, use - - with module.streaming(): - ... - - This will automatically reset the streaming state when exiting the context manager. - This also automatically propagates to all streaming children module. - - Some module might also implement the `StreamingModule.flush` method, although - this one is trickier, as all parents module must be StreamingModule and implement - it as well for it to work properly. See `StreamingSequential` after. - """ - def __init__(self) -> None: - super().__init__() - self._streaming_state: State = {} - self._is_streaming = False - - def _apply_named_streaming(self, fn: tp.Any): - for name, module in self.named_modules(): - if isinstance(module, StreamingModule): - fn(name, module) - - def _set_streaming(self, streaming: bool): - def _set_streaming(name, module): - module._is_streaming = streaming - self._apply_named_streaming(_set_streaming) - - @contextmanager - def streaming(self): - """Context manager to enter streaming mode. Reset streaming state on exit. - """ - self._set_streaming(True) - try: - yield - finally: - self._set_streaming(False) - self.reset_streaming() - - def reset_streaming(self): - """Reset the streaming state. - """ - def _reset(name: str, module: StreamingModule): - module._streaming_state.clear() - - self._apply_named_streaming(_reset) - - def get_streaming_state(self) -> State: - """Return the streaming state, including that of sub-modules. - """ - state: State = {} - - def _add(name: str, module: StreamingModule): - if name: - name += "." - for key, value in module._streaming_state.items(): - state[name + key] = value - - self._apply_named_streaming(_add) - return state - - def set_streaming_state(self, state: State): - """Set the streaming state, including that of sub-modules. - """ - state = dict(state) - - def _set(name: str, module: StreamingModule): - if name: - name += "." - module._streaming_state.clear() - for key, value in list(state.items()): - # complexity is not ideal here, but probably fine. - if key.startswith(name): - local_key = key[len(name):] - if '.' not in local_key: - module._streaming_state[local_key] = value - del state[key] - - self._apply_named_streaming(_set) - assert len(state) == 0, list(state.keys()) - - def flush(self, x: tp.Optional[torch.Tensor] = None): - """Flush any remaining outputs that were waiting for completion. - Typically, for convolutions, this will add the final padding - and process the last buffer. - - This should take an optional argument `x`, which will be provided - if a module before this one in the streaming pipeline has already - spitted out a flushed out buffer. - """ - if x is None: - return None - else: - return self(x) - - -class StreamingSequential(StreamingModule, nn.Sequential): - """A streaming compatible alternative of `nn.Sequential`. - """ - def flush(self, x: tp.Optional[torch.Tensor] = None): - for module in self: - if isinstance(module, StreamingModule): - x = module.flush(x) - elif x is not None: - x = module(x) - return x diff --git a/spaces/cfwef/gpt/crazy_functions/test_project/cpp/cppipc/buffer.cpp b/spaces/cfwef/gpt/crazy_functions/test_project/cpp/cppipc/buffer.cpp deleted file mode 100644 index 0ac0fa7bc3ced0447ba4caa359355dd4252670b3..0000000000000000000000000000000000000000 --- a/spaces/cfwef/gpt/crazy_functions/test_project/cpp/cppipc/buffer.cpp +++ /dev/null @@ -1,87 +0,0 @@ -#include "libipc/buffer.h" -#include "libipc/utility/pimpl.h" - -#include <cstring> - -namespace ipc { - -bool operator==(buffer const & b1, buffer const & b2) { - return (b1.size() == b2.size()) && (std::memcmp(b1.data(), b2.data(), b1.size()) == 0); -} - -bool operator!=(buffer const & b1, buffer const & b2) { - return !(b1 == b2); -} - -class buffer::buffer_ : public pimpl<buffer_> { -public: - void* p_; - std::size_t s_; - void* a_; - buffer::destructor_t d_; - - buffer_(void* p, std::size_t s, buffer::destructor_t d, void* a) - : p_(p), s_(s), a_(a), d_(d) { - } - - ~buffer_() { - if (d_ == nullptr) return; - d_((a_ == nullptr) ? p_ : a_, s_); - } -}; - -buffer::buffer() - : buffer(nullptr, 0, nullptr, nullptr) { -} - -buffer::buffer(void* p, std::size_t s, destructor_t d) - : p_(p_->make(p, s, d, nullptr)) { -} - -buffer::buffer(void* p, std::size_t s, destructor_t d, void* additional) - : p_(p_->make(p, s, d, additional)) { -} - -buffer::buffer(void* p, std::size_t s) - : buffer(p, s, nullptr) { -} - -buffer::buffer(char const & c) - : buffer(const_cast<char*>(&c), 1) { -} - -buffer::buffer(buffer&& rhs) - : buffer() { - swap(rhs); -} - -buffer::~buffer() { - p_->clear(); -} - -void buffer::swap(buffer& rhs) { - std::swap(p_, rhs.p_); -} - -buffer& buffer::operator=(buffer rhs) { - swap(rhs); - return *this; -} - -bool buffer::empty() const noexcept { - return (impl(p_)->p_ == nullptr) || (impl(p_)->s_ == 0); -} - -void* buffer::data() noexcept { - return impl(p_)->p_; -} - -void const * buffer::data() const noexcept { - return impl(p_)->p_; -} - -std::size_t buffer::size() const noexcept { - return impl(p_)->s_; -} - -} // namespace ipc diff --git a/spaces/chansung/LLaMA2-Story-Showcase/README.md b/spaces/chansung/LLaMA2-Story-Showcase/README.md deleted file mode 100644 index 30c1880012b1d840b6c9c0164ca77b8b79bbba94..0000000000000000000000000000000000000000 --- a/spaces/chansung/LLaMA2-Story-Showcase/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: LLaMA2 Story Showcase -emoji: 📖 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/chansung/llm-discord-bot/Dockerfile b/spaces/chansung/llm-discord-bot/Dockerfile deleted file mode 100644 index f27362d34fd147d54e525be9784427485ed1fafc..0000000000000000000000000000000000000000 --- a/spaces/chansung/llm-discord-bot/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM python:3.10 - -RUN useradd -m -u 1000 user -USER user - -WORKDIR /LLM-As-Chatbot - -RUN git clone https://github.com/deep-diver/LLM-As-Chatbot.git . -RUN pip install --no-cache-dir --upgrade -r requirements.txt - -COPY --chown=user health_check_200.py health_check_200.py -COPY --chown=user entry_script.sh entry_script.sh -RUN chmod +x ./entry_script.sh - -ENV HF_HOME=./ -ENV LLMCHAT_APP_MODE=DISCORD -ENV DISCORD_BOT_MAX_WORKERS=1 -ENV DISCORD_BOT_LOAD_MODE=CPU - -# Full list of supported models can be found -# at https://github.com/deep-diver/LLM-As-Chatbot/blob/main/model_cards.json -ENV DISCORD_BOT_MODEL_NAME=gpt4-alpaca-7b - -# Also, DISCORD_BOT_TOKEN should be set as HF Space Secret -CMD ./entry_script.sh \ No newline at end of file diff --git a/spaces/chasemcdo/hf_localai/pkg/gallery/gallery_suite_test.go b/spaces/chasemcdo/hf_localai/pkg/gallery/gallery_suite_test.go deleted file mode 100644 index 44256bc27e97051f7ff47dbefc9f80da95b2b2aa..0000000000000000000000000000000000000000 --- a/spaces/chasemcdo/hf_localai/pkg/gallery/gallery_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package gallery_test - -import ( - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -func TestGallery(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Gallery test suite") -} diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/language-modeling/README.md b/spaces/chendl/compositional_test/transformers/examples/pytorch/language-modeling/README.md deleted file mode 100644 index 3069fe9eb974c1c6505328670b514c05316bc4e7..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/pytorch/language-modeling/README.md +++ /dev/null @@ -1,195 +0,0 @@ -<!--- -Copyright 2020 The HuggingFace Team. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ---> - -## Language model training - -Fine-tuning (or training from scratch) the library models for language modeling on a text dataset for GPT, GPT-2, -ALBERT, BERT, DistilBERT, RoBERTa, XLNet... GPT and GPT-2 are trained or fine-tuned using a causal language modeling -(CLM) loss while ALBERT, BERT, DistilBERT and RoBERTa are trained or fine-tuned using a masked language modeling (MLM) -loss. XLNet uses permutation language modeling (PLM), you can find more information about the differences between those -objectives in our [model summary](https://huggingface.co/transformers/model_summary.html). - -There are two sets of scripts provided. The first set leverages the Trainer API. The second set with `no_trainer` in the suffix uses a custom training loop and leverages the 🤗 Accelerate library . Both sets use the 🤗 Datasets library. You can easily customize them to your needs if you need extra processing on your datasets. - -**Note:** The old script `run_language_modeling.py` is still available [here](https://github.com/huggingface/transformers/blob/main/examples/legacy/run_language_modeling.py). - -The following examples, will run on datasets hosted on our [hub](https://huggingface.co/datasets) or with your own -text files for training and validation. We give examples of both below. - -### GPT-2/GPT and causal language modeling - -The following example fine-tunes GPT-2 on WikiText-2. We're using the raw WikiText-2 (no tokens were replaced before -the tokenization). The loss here is that of causal language modeling. - -```bash -python run_clm.py \ - --model_name_or_path gpt2 \ - --dataset_name wikitext \ - --dataset_config_name wikitext-2-raw-v1 \ - --per_device_train_batch_size 8 \ - --per_device_eval_batch_size 8 \ - --do_train \ - --do_eval \ - --output_dir /tmp/test-clm -``` - -This takes about half an hour to train on a single K80 GPU and about one minute for the evaluation to run. It reaches -a score of ~20 perplexity once fine-tuned on the dataset. - -To run on your own training and validation files, use the following command: - -```bash -python run_clm.py \ - --model_name_or_path gpt2 \ - --train_file path_to_train_file \ - --validation_file path_to_validation_file \ - --per_device_train_batch_size 8 \ - --per_device_eval_batch_size 8 \ - --do_train \ - --do_eval \ - --output_dir /tmp/test-clm -``` - -This uses the built in HuggingFace `Trainer` for training. If you want to use a custom training loop, you can utilize or adapt the `run_clm_no_trainer.py` script. Take a look at the script for a list of supported arguments. An example is shown below: - -```bash -python run_clm_no_trainer.py \ - --dataset_name wikitext \ - --dataset_config_name wikitext-2-raw-v1 \ - --model_name_or_path gpt2 \ - --output_dir /tmp/test-clm -``` - -### RoBERTa/BERT/DistilBERT and masked language modeling - -The following example fine-tunes RoBERTa on WikiText-2. Here too, we're using the raw WikiText-2. The loss is different -as BERT/RoBERTa have a bidirectional mechanism; we're therefore using the same loss that was used during their -pre-training: masked language modeling. - -In accordance to the RoBERTa paper, we use dynamic masking rather than static masking. The model may, therefore, -converge slightly slower (over-fitting takes more epochs). - -```bash -python run_mlm.py \ - --model_name_or_path roberta-base \ - --dataset_name wikitext \ - --dataset_config_name wikitext-2-raw-v1 \ - --per_device_train_batch_size 8 \ - --per_device_eval_batch_size 8 \ - --do_train \ - --do_eval \ - --output_dir /tmp/test-mlm -``` - -To run on your own training and validation files, use the following command: - -```bash -python run_mlm.py \ - --model_name_or_path roberta-base \ - --train_file path_to_train_file \ - --validation_file path_to_validation_file \ - --per_device_train_batch_size 8 \ - --per_device_eval_batch_size 8 \ - --do_train \ - --do_eval \ - --output_dir /tmp/test-mlm -``` - -If your dataset is organized with one sample per line, you can use the `--line_by_line` flag (otherwise the script -concatenates all texts and then splits them in blocks of the same length). - -This uses the built in HuggingFace `Trainer` for training. If you want to use a custom training loop, you can utilize or adapt the `run_mlm_no_trainer.py` script. Take a look at the script for a list of supported arguments. An example is shown below: - -```bash -python run_mlm_no_trainer.py \ - --dataset_name wikitext \ - --dataset_config_name wikitext-2-raw-v1 \ - --model_name_or_path roberta-base \ - --output_dir /tmp/test-mlm -``` - -**Note:** On TPU, you should use the flag `--pad_to_max_length` in conjunction with the `--line_by_line` flag to make -sure all your batches have the same length. - -### Whole word masking - -This part was moved to `examples/research_projects/mlm_wwm`. - -### XLNet and permutation language modeling - -XLNet uses a different training objective, which is permutation language modeling. It is an autoregressive method -to learn bidirectional contexts by maximizing the expected likelihood over all permutations of the input -sequence factorization order. - -We use the `--plm_probability` flag to define the ratio of length of a span of masked tokens to surrounding -context length for permutation language modeling. - -The `--max_span_length` flag may also be used to limit the length of a span of masked tokens used -for permutation language modeling. - -Here is how to fine-tune XLNet on wikitext-2: - -```bash -python run_plm.py \ - --model_name_or_path=xlnet-base-cased \ - --dataset_name wikitext \ - --dataset_config_name wikitext-2-raw-v1 \ - --per_device_train_batch_size 8 \ - --per_device_eval_batch_size 8 \ - --do_train \ - --do_eval \ - --output_dir /tmp/test-plm -``` - -To fine-tune it on your own training and validation file, run: - -```bash -python run_plm.py \ - --model_name_or_path=xlnet-base-cased \ - --train_file path_to_train_file \ - --validation_file path_to_validation_file \ - --per_device_train_batch_size 8 \ - --per_device_eval_batch_size 8 \ - --do_train \ - --do_eval \ - --output_dir /tmp/test-plm -``` - -If your dataset is organized with one sample per line, you can use the `--line_by_line` flag (otherwise the script -concatenates all texts and then splits them in blocks of the same length). - -**Note:** On TPU, you should use the flag `--pad_to_max_length` in conjunction with the `--line_by_line` flag to make -sure all your batches have the same length. - -## Streaming - -To use the streaming dataset mode which can be very useful for large datasets, add `--streaming` to the command line. This is currently supported by `run_mlm.py` and `run_clm.py`. - -## Low Cpu Memory Usage - -To use low cpu memory mode which can be very useful for LLM, add `--low_cpu_mem_usage` to the command line. This is currently supported by `run_clm.py`,`run_mlm.py`, `run_plm.py`,`run_mlm_no_trainer.py` and `run_clm_no_trainer.py`. - -## Creating a model on the fly - -When training a model from scratch, configuration values may be overridden with the help of `--config_overrides`: - - -```bash -python run_clm.py --model_type gpt2 --tokenizer_name gpt2 \ --config_overrides="n_embd=1024,n_head=16,n_layer=48,n_positions=102" \ -[...] -``` - -This feature is only available in `run_clm.py`, `run_plm.py` and `run_mlm.py`. diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/bertabs/run_summarization.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/bertabs/run_summarization.py deleted file mode 100644 index 82ef8ab39ea9b72249faa483ebfb37fd5ef5ba59..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/bertabs/run_summarization.py +++ /dev/null @@ -1,347 +0,0 @@ -#! /usr/bin/python3 -import argparse -import logging -import os -import sys -from collections import namedtuple - -import torch -from modeling_bertabs import BertAbs, build_predictor -from torch.utils.data import DataLoader, SequentialSampler -from tqdm import tqdm - -from transformers import BertTokenizer - -from .utils_summarization import ( - CNNDMDataset, - build_mask, - compute_token_type_ids, - encode_for_summarization, - truncate_or_pad, -) - - -logger = logging.getLogger(__name__) -logging.basicConfig(stream=sys.stdout, level=logging.INFO) - - -Batch = namedtuple("Batch", ["document_names", "batch_size", "src", "segs", "mask_src", "tgt_str"]) - - -def evaluate(args): - tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True) - model = BertAbs.from_pretrained("remi/bertabs-finetuned-extractive-abstractive-summarization") - model.to(args.device) - model.eval() - - symbols = { - "BOS": tokenizer.vocab["[unused0]"], - "EOS": tokenizer.vocab["[unused1]"], - "PAD": tokenizer.vocab["[PAD]"], - } - - if args.compute_rouge: - reference_summaries = [] - generated_summaries = [] - - import nltk - import rouge - - nltk.download("punkt") - rouge_evaluator = rouge.Rouge( - metrics=["rouge-n", "rouge-l"], - max_n=2, - limit_length=True, - length_limit=args.beam_size, - length_limit_type="words", - apply_avg=True, - apply_best=False, - alpha=0.5, # Default F1_score - weight_factor=1.2, - stemming=True, - ) - - # these (unused) arguments are defined to keep the compatibility - # with the legacy code and will be deleted in a next iteration. - args.result_path = "" - args.temp_dir = "" - - data_iterator = build_data_iterator(args, tokenizer) - predictor = build_predictor(args, tokenizer, symbols, model) - - logger.info("***** Running evaluation *****") - logger.info(" Number examples = %d", len(data_iterator.dataset)) - logger.info(" Batch size = %d", args.batch_size) - logger.info("") - logger.info("***** Beam Search parameters *****") - logger.info(" Beam size = %d", args.beam_size) - logger.info(" Minimum length = %d", args.min_length) - logger.info(" Maximum length = %d", args.max_length) - logger.info(" Alpha (length penalty) = %.2f", args.alpha) - logger.info(" Trigrams %s be blocked", ("will" if args.block_trigram else "will NOT")) - - for batch in tqdm(data_iterator): - batch_data = predictor.translate_batch(batch) - translations = predictor.from_batch(batch_data) - summaries = [format_summary(t) for t in translations] - save_summaries(summaries, args.summaries_output_dir, batch.document_names) - - if args.compute_rouge: - reference_summaries += batch.tgt_str - generated_summaries += summaries - - if args.compute_rouge: - scores = rouge_evaluator.get_scores(generated_summaries, reference_summaries) - str_scores = format_rouge_scores(scores) - save_rouge_scores(str_scores) - print(str_scores) - - -def save_summaries(summaries, path, original_document_name): - """Write the summaries in fies that are prefixed by the original - files' name with the `_summary` appended. - - Attributes: - original_document_names: List[string] - Name of the document that was summarized. - path: string - Path were the summaries will be written - summaries: List[string] - The summaries that we produced. - """ - for summary, document_name in zip(summaries, original_document_name): - # Prepare the summary file's name - if "." in document_name: - bare_document_name = ".".join(document_name.split(".")[:-1]) - extension = document_name.split(".")[-1] - name = bare_document_name + "_summary." + extension - else: - name = document_name + "_summary" - - file_path = os.path.join(path, name) - with open(file_path, "w") as output: - output.write(summary) - - -def format_summary(translation): - """Transforms the output of the `from_batch` function - into nicely formatted summaries. - """ - raw_summary, _, _ = translation - summary = ( - raw_summary.replace("[unused0]", "") - .replace("[unused3]", "") - .replace("[PAD]", "") - .replace("[unused1]", "") - .replace(r" +", " ") - .replace(" [unused2] ", ". ") - .replace("[unused2]", "") - .strip() - ) - - return summary - - -def format_rouge_scores(scores): - return """\n -****** ROUGE SCORES ****** - -** ROUGE 1 -F1 >> {:.3f} -Precision >> {:.3f} -Recall >> {:.3f} - -** ROUGE 2 -F1 >> {:.3f} -Precision >> {:.3f} -Recall >> {:.3f} - -** ROUGE L -F1 >> {:.3f} -Precision >> {:.3f} -Recall >> {:.3f}""".format( - scores["rouge-1"]["f"], - scores["rouge-1"]["p"], - scores["rouge-1"]["r"], - scores["rouge-2"]["f"], - scores["rouge-2"]["p"], - scores["rouge-2"]["r"], - scores["rouge-l"]["f"], - scores["rouge-l"]["p"], - scores["rouge-l"]["r"], - ) - - -def save_rouge_scores(str_scores): - with open("rouge_scores.txt", "w") as output: - output.write(str_scores) - - -# -# LOAD the dataset -# - - -def build_data_iterator(args, tokenizer): - dataset = load_and_cache_examples(args, tokenizer) - sampler = SequentialSampler(dataset) - - def collate_fn(data): - return collate(data, tokenizer, block_size=512, device=args.device) - - iterator = DataLoader( - dataset, - sampler=sampler, - batch_size=args.batch_size, - collate_fn=collate_fn, - ) - - return iterator - - -def load_and_cache_examples(args, tokenizer): - dataset = CNNDMDataset(args.documents_dir) - return dataset - - -def collate(data, tokenizer, block_size, device): - """Collate formats the data passed to the data loader. - - In particular we tokenize the data batch after batch to avoid keeping them - all in memory. We output the data as a namedtuple to fit the original BertAbs's - API. - """ - data = [x for x in data if not len(x[1]) == 0] # remove empty_files - names = [name for name, _, _ in data] - summaries = [" ".join(summary_list) for _, _, summary_list in data] - - encoded_text = [encode_for_summarization(story, summary, tokenizer) for _, story, summary in data] - encoded_stories = torch.tensor( - [truncate_or_pad(story, block_size, tokenizer.pad_token_id) for story, _ in encoded_text] - ) - encoder_token_type_ids = compute_token_type_ids(encoded_stories, tokenizer.cls_token_id) - encoder_mask = build_mask(encoded_stories, tokenizer.pad_token_id) - - batch = Batch( - document_names=names, - batch_size=len(encoded_stories), - src=encoded_stories.to(device), - segs=encoder_token_type_ids.to(device), - mask_src=encoder_mask.to(device), - tgt_str=summaries, - ) - - return batch - - -def decode_summary(summary_tokens, tokenizer): - """Decode the summary and return it in a format - suitable for evaluation. - """ - summary_tokens = summary_tokens.to("cpu").numpy() - summary = tokenizer.decode(summary_tokens) - sentences = summary.split(".") - sentences = [s + "." for s in sentences] - return sentences - - -def main(): - """The main function defines the interface with the users.""" - parser = argparse.ArgumentParser() - parser.add_argument( - "--documents_dir", - default=None, - type=str, - required=True, - help="The folder where the documents to summarize are located.", - ) - parser.add_argument( - "--summaries_output_dir", - default=None, - type=str, - required=False, - help="The folder in wich the summaries should be written. Defaults to the folder where the documents are", - ) - parser.add_argument( - "--compute_rouge", - default=False, - type=bool, - required=False, - help="Compute the ROUGE metrics during evaluation. Only available for the CNN/DailyMail dataset.", - ) - # EVALUATION options - parser.add_argument( - "--no_cuda", - default=False, - type=bool, - help="Whether to force the execution on CPU.", - ) - parser.add_argument( - "--batch_size", - default=4, - type=int, - help="Batch size per GPU/CPU for training.", - ) - # BEAM SEARCH arguments - parser.add_argument( - "--min_length", - default=50, - type=int, - help="Minimum number of tokens for the summaries.", - ) - parser.add_argument( - "--max_length", - default=200, - type=int, - help="Maixmum number of tokens for the summaries.", - ) - parser.add_argument( - "--beam_size", - default=5, - type=int, - help="The number of beams to start with for each example.", - ) - parser.add_argument( - "--alpha", - default=0.95, - type=float, - help="The value of alpha for the length penalty in the beam search.", - ) - parser.add_argument( - "--block_trigram", - default=True, - type=bool, - help="Whether to block the existence of repeating trigrams in the text generated by beam search.", - ) - args = parser.parse_args() - - # Select device (distibuted not available) - args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") - - # Check the existence of directories - if not args.summaries_output_dir: - args.summaries_output_dir = args.documents_dir - - if not documents_dir_is_valid(args.documents_dir): - raise FileNotFoundError( - "We could not find the directory you specified for the documents to summarize, or it was empty. Please" - " specify a valid path." - ) - os.makedirs(args.summaries_output_dir, exist_ok=True) - - evaluate(args) - - -def documents_dir_is_valid(path): - if not os.path.exists(path): - return False - - file_list = os.listdir(path) - if len(file_list) == 0: - return False - - return True - - -if __name__ == "__main__": - main() diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/click/_compat.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/click/_compat.py deleted file mode 100644 index 9153d150ce67a708f920fcf9c606970fc061f816..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/click/_compat.py +++ /dev/null @@ -1,623 +0,0 @@ -import codecs -import io -import os -import re -import sys -import typing as t -from weakref import WeakKeyDictionary - -CYGWIN = sys.platform.startswith("cygwin") -WIN = sys.platform.startswith("win") -auto_wrap_for_ansi: t.Optional[t.Callable[[t.TextIO], t.TextIO]] = None -_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]") - - -def _make_text_stream( - stream: t.BinaryIO, - encoding: t.Optional[str], - errors: t.Optional[str], - force_readable: bool = False, - force_writable: bool = False, -) -> t.TextIO: - if encoding is None: - encoding = get_best_encoding(stream) - if errors is None: - errors = "replace" - return _NonClosingTextIOWrapper( - stream, - encoding, - errors, - line_buffering=True, - force_readable=force_readable, - force_writable=force_writable, - ) - - -def is_ascii_encoding(encoding: str) -> bool: - """Checks if a given encoding is ascii.""" - try: - return codecs.lookup(encoding).name == "ascii" - except LookupError: - return False - - -def get_best_encoding(stream: t.IO[t.Any]) -> str: - """Returns the default stream encoding if not found.""" - rv = getattr(stream, "encoding", None) or sys.getdefaultencoding() - if is_ascii_encoding(rv): - return "utf-8" - return rv - - -class _NonClosingTextIOWrapper(io.TextIOWrapper): - def __init__( - self, - stream: t.BinaryIO, - encoding: t.Optional[str], - errors: t.Optional[str], - force_readable: bool = False, - force_writable: bool = False, - **extra: t.Any, - ) -> None: - self._stream = stream = t.cast( - t.BinaryIO, _FixupStream(stream, force_readable, force_writable) - ) - super().__init__(stream, encoding, errors, **extra) - - def __del__(self) -> None: - try: - self.detach() - except Exception: - pass - - def isatty(self) -> bool: - # https://bitbucket.org/pypy/pypy/issue/1803 - return self._stream.isatty() - - -class _FixupStream: - """The new io interface needs more from streams than streams - traditionally implement. As such, this fix-up code is necessary in - some circumstances. - - The forcing of readable and writable flags are there because some tools - put badly patched objects on sys (one such offender are certain version - of jupyter notebook). - """ - - def __init__( - self, - stream: t.BinaryIO, - force_readable: bool = False, - force_writable: bool = False, - ): - self._stream = stream - self._force_readable = force_readable - self._force_writable = force_writable - - def __getattr__(self, name: str) -> t.Any: - return getattr(self._stream, name) - - def read1(self, size: int) -> bytes: - f = getattr(self._stream, "read1", None) - - if f is not None: - return t.cast(bytes, f(size)) - - return self._stream.read(size) - - def readable(self) -> bool: - if self._force_readable: - return True - x = getattr(self._stream, "readable", None) - if x is not None: - return t.cast(bool, x()) - try: - self._stream.read(0) - except Exception: - return False - return True - - def writable(self) -> bool: - if self._force_writable: - return True - x = getattr(self._stream, "writable", None) - if x is not None: - return t.cast(bool, x()) - try: - self._stream.write("") # type: ignore - except Exception: - try: - self._stream.write(b"") - except Exception: - return False - return True - - def seekable(self) -> bool: - x = getattr(self._stream, "seekable", None) - if x is not None: - return t.cast(bool, x()) - try: - self._stream.seek(self._stream.tell()) - except Exception: - return False - return True - - -def _is_binary_reader(stream: t.IO[t.Any], default: bool = False) -> bool: - try: - return isinstance(stream.read(0), bytes) - except Exception: - return default - # This happens in some cases where the stream was already - # closed. In this case, we assume the default. - - -def _is_binary_writer(stream: t.IO[t.Any], default: bool = False) -> bool: - try: - stream.write(b"") - except Exception: - try: - stream.write("") - return False - except Exception: - pass - return default - return True - - -def _find_binary_reader(stream: t.IO[t.Any]) -> t.Optional[t.BinaryIO]: - # We need to figure out if the given stream is already binary. - # This can happen because the official docs recommend detaching - # the streams to get binary streams. Some code might do this, so - # we need to deal with this case explicitly. - if _is_binary_reader(stream, False): - return t.cast(t.BinaryIO, stream) - - buf = getattr(stream, "buffer", None) - - # Same situation here; this time we assume that the buffer is - # actually binary in case it's closed. - if buf is not None and _is_binary_reader(buf, True): - return t.cast(t.BinaryIO, buf) - - return None - - -def _find_binary_writer(stream: t.IO[t.Any]) -> t.Optional[t.BinaryIO]: - # We need to figure out if the given stream is already binary. - # This can happen because the official docs recommend detaching - # the streams to get binary streams. Some code might do this, so - # we need to deal with this case explicitly. - if _is_binary_writer(stream, False): - return t.cast(t.BinaryIO, stream) - - buf = getattr(stream, "buffer", None) - - # Same situation here; this time we assume that the buffer is - # actually binary in case it's closed. - if buf is not None and _is_binary_writer(buf, True): - return t.cast(t.BinaryIO, buf) - - return None - - -def _stream_is_misconfigured(stream: t.TextIO) -> bool: - """A stream is misconfigured if its encoding is ASCII.""" - # If the stream does not have an encoding set, we assume it's set - # to ASCII. This appears to happen in certain unittest - # environments. It's not quite clear what the correct behavior is - # but this at least will force Click to recover somehow. - return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii") - - -def _is_compat_stream_attr(stream: t.TextIO, attr: str, value: t.Optional[str]) -> bool: - """A stream attribute is compatible if it is equal to the - desired value or the desired value is unset and the attribute - has a value. - """ - stream_value = getattr(stream, attr, None) - return stream_value == value or (value is None and stream_value is not None) - - -def _is_compatible_text_stream( - stream: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str] -) -> bool: - """Check if a stream's encoding and errors attributes are - compatible with the desired values. - """ - return _is_compat_stream_attr( - stream, "encoding", encoding - ) and _is_compat_stream_attr(stream, "errors", errors) - - -def _force_correct_text_stream( - text_stream: t.IO[t.Any], - encoding: t.Optional[str], - errors: t.Optional[str], - is_binary: t.Callable[[t.IO[t.Any], bool], bool], - find_binary: t.Callable[[t.IO[t.Any]], t.Optional[t.BinaryIO]], - force_readable: bool = False, - force_writable: bool = False, -) -> t.TextIO: - if is_binary(text_stream, False): - binary_reader = t.cast(t.BinaryIO, text_stream) - else: - text_stream = t.cast(t.TextIO, text_stream) - # If the stream looks compatible, and won't default to a - # misconfigured ascii encoding, return it as-is. - if _is_compatible_text_stream(text_stream, encoding, errors) and not ( - encoding is None and _stream_is_misconfigured(text_stream) - ): - return text_stream - - # Otherwise, get the underlying binary reader. - possible_binary_reader = find_binary(text_stream) - - # If that's not possible, silently use the original reader - # and get mojibake instead of exceptions. - if possible_binary_reader is None: - return text_stream - - binary_reader = possible_binary_reader - - # Default errors to replace instead of strict in order to get - # something that works. - if errors is None: - errors = "replace" - - # Wrap the binary stream in a text stream with the correct - # encoding parameters. - return _make_text_stream( - binary_reader, - encoding, - errors, - force_readable=force_readable, - force_writable=force_writable, - ) - - -def _force_correct_text_reader( - text_reader: t.IO[t.Any], - encoding: t.Optional[str], - errors: t.Optional[str], - force_readable: bool = False, -) -> t.TextIO: - return _force_correct_text_stream( - text_reader, - encoding, - errors, - _is_binary_reader, - _find_binary_reader, - force_readable=force_readable, - ) - - -def _force_correct_text_writer( - text_writer: t.IO[t.Any], - encoding: t.Optional[str], - errors: t.Optional[str], - force_writable: bool = False, -) -> t.TextIO: - return _force_correct_text_stream( - text_writer, - encoding, - errors, - _is_binary_writer, - _find_binary_writer, - force_writable=force_writable, - ) - - -def get_binary_stdin() -> t.BinaryIO: - reader = _find_binary_reader(sys.stdin) - if reader is None: - raise RuntimeError("Was not able to determine binary stream for sys.stdin.") - return reader - - -def get_binary_stdout() -> t.BinaryIO: - writer = _find_binary_writer(sys.stdout) - if writer is None: - raise RuntimeError("Was not able to determine binary stream for sys.stdout.") - return writer - - -def get_binary_stderr() -> t.BinaryIO: - writer = _find_binary_writer(sys.stderr) - if writer is None: - raise RuntimeError("Was not able to determine binary stream for sys.stderr.") - return writer - - -def get_text_stdin( - encoding: t.Optional[str] = None, errors: t.Optional[str] = None -) -> t.TextIO: - rv = _get_windows_console_stream(sys.stdin, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_reader(sys.stdin, encoding, errors, force_readable=True) - - -def get_text_stdout( - encoding: t.Optional[str] = None, errors: t.Optional[str] = None -) -> t.TextIO: - rv = _get_windows_console_stream(sys.stdout, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_writer(sys.stdout, encoding, errors, force_writable=True) - - -def get_text_stderr( - encoding: t.Optional[str] = None, errors: t.Optional[str] = None -) -> t.TextIO: - rv = _get_windows_console_stream(sys.stderr, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_writer(sys.stderr, encoding, errors, force_writable=True) - - -def _wrap_io_open( - file: t.Union[str, "os.PathLike[str]", int], - mode: str, - encoding: t.Optional[str], - errors: t.Optional[str], -) -> t.IO[t.Any]: - """Handles not passing ``encoding`` and ``errors`` in binary mode.""" - if "b" in mode: - return open(file, mode) - - return open(file, mode, encoding=encoding, errors=errors) - - -def open_stream( - filename: "t.Union[str, os.PathLike[str]]", - mode: str = "r", - encoding: t.Optional[str] = None, - errors: t.Optional[str] = "strict", - atomic: bool = False, -) -> t.Tuple[t.IO[t.Any], bool]: - binary = "b" in mode - filename = os.fspath(filename) - - # Standard streams first. These are simple because they ignore the - # atomic flag. Use fsdecode to handle Path("-"). - if os.fsdecode(filename) == "-": - if any(m in mode for m in ["w", "a", "x"]): - if binary: - return get_binary_stdout(), False - return get_text_stdout(encoding=encoding, errors=errors), False - if binary: - return get_binary_stdin(), False - return get_text_stdin(encoding=encoding, errors=errors), False - - # Non-atomic writes directly go out through the regular open functions. - if not atomic: - return _wrap_io_open(filename, mode, encoding, errors), True - - # Some usability stuff for atomic writes - if "a" in mode: - raise ValueError( - "Appending to an existing file is not supported, because that" - " would involve an expensive `copy`-operation to a temporary" - " file. Open the file in normal `w`-mode and copy explicitly" - " if that's what you're after." - ) - if "x" in mode: - raise ValueError("Use the `overwrite`-parameter instead.") - if "w" not in mode: - raise ValueError("Atomic writes only make sense with `w`-mode.") - - # Atomic writes are more complicated. They work by opening a file - # as a proxy in the same folder and then using the fdopen - # functionality to wrap it in a Python file. Then we wrap it in an - # atomic file that moves the file over on close. - import errno - import random - - try: - perm: t.Optional[int] = os.stat(filename).st_mode - except OSError: - perm = None - - flags = os.O_RDWR | os.O_CREAT | os.O_EXCL - - if binary: - flags |= getattr(os, "O_BINARY", 0) - - while True: - tmp_filename = os.path.join( - os.path.dirname(filename), - f".__atomic-write{random.randrange(1 << 32):08x}", - ) - try: - fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm) - break - except OSError as e: - if e.errno == errno.EEXIST or ( - os.name == "nt" - and e.errno == errno.EACCES - and os.path.isdir(e.filename) - and os.access(e.filename, os.W_OK) - ): - continue - raise - - if perm is not None: - os.chmod(tmp_filename, perm) # in case perm includes bits in umask - - f = _wrap_io_open(fd, mode, encoding, errors) - af = _AtomicFile(f, tmp_filename, os.path.realpath(filename)) - return t.cast(t.IO[t.Any], af), True - - -class _AtomicFile: - def __init__(self, f: t.IO[t.Any], tmp_filename: str, real_filename: str) -> None: - self._f = f - self._tmp_filename = tmp_filename - self._real_filename = real_filename - self.closed = False - - @property - def name(self) -> str: - return self._real_filename - - def close(self, delete: bool = False) -> None: - if self.closed: - return - self._f.close() - os.replace(self._tmp_filename, self._real_filename) - self.closed = True - - def __getattr__(self, name: str) -> t.Any: - return getattr(self._f, name) - - def __enter__(self) -> "_AtomicFile": - return self - - def __exit__(self, exc_type: t.Optional[t.Type[BaseException]], *_: t.Any) -> None: - self.close(delete=exc_type is not None) - - def __repr__(self) -> str: - return repr(self._f) - - -def strip_ansi(value: str) -> str: - return _ansi_re.sub("", value) - - -def _is_jupyter_kernel_output(stream: t.IO[t.Any]) -> bool: - while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)): - stream = stream._stream - - return stream.__class__.__module__.startswith("ipykernel.") - - -def should_strip_ansi( - stream: t.Optional[t.IO[t.Any]] = None, color: t.Optional[bool] = None -) -> bool: - if color is None: - if stream is None: - stream = sys.stdin - return not isatty(stream) and not _is_jupyter_kernel_output(stream) - return not color - - -# On Windows, wrap the output streams with colorama to support ANSI -# color codes. -# NOTE: double check is needed so mypy does not analyze this on Linux -if sys.platform.startswith("win") and WIN: - from ._winconsole import _get_windows_console_stream - - def _get_argv_encoding() -> str: - import locale - - return locale.getpreferredencoding() - - _ansi_stream_wrappers: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary() - - def auto_wrap_for_ansi( - stream: t.TextIO, color: t.Optional[bool] = None - ) -> t.TextIO: - """Support ANSI color and style codes on Windows by wrapping a - stream with colorama. - """ - try: - cached = _ansi_stream_wrappers.get(stream) - except Exception: - cached = None - - if cached is not None: - return cached - - import colorama - - strip = should_strip_ansi(stream, color) - ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip) - rv = t.cast(t.TextIO, ansi_wrapper.stream) - _write = rv.write - - def _safe_write(s): - try: - return _write(s) - except BaseException: - ansi_wrapper.reset_all() - raise - - rv.write = _safe_write - - try: - _ansi_stream_wrappers[stream] = rv - except Exception: - pass - - return rv - -else: - - def _get_argv_encoding() -> str: - return getattr(sys.stdin, "encoding", None) or sys.getfilesystemencoding() - - def _get_windows_console_stream( - f: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str] - ) -> t.Optional[t.TextIO]: - return None - - -def term_len(x: str) -> int: - return len(strip_ansi(x)) - - -def isatty(stream: t.IO[t.Any]) -> bool: - try: - return stream.isatty() - except Exception: - return False - - -def _make_cached_stream_func( - src_func: t.Callable[[], t.Optional[t.TextIO]], - wrapper_func: t.Callable[[], t.TextIO], -) -> t.Callable[[], t.Optional[t.TextIO]]: - cache: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary() - - def func() -> t.Optional[t.TextIO]: - stream = src_func() - - if stream is None: - return None - - try: - rv = cache.get(stream) - except Exception: - rv = None - if rv is not None: - return rv - rv = wrapper_func() - try: - cache[stream] = rv - except Exception: - pass - return rv - - return func - - -_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin) -_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout) -_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr) - - -binary_streams: t.Mapping[str, t.Callable[[], t.BinaryIO]] = { - "stdin": get_binary_stdin, - "stdout": get_binary_stdout, - "stderr": get_binary_stderr, -} - -text_streams: t.Mapping[ - str, t.Callable[[t.Optional[str], t.Optional[str]], t.TextIO] -] = { - "stdin": get_text_stdin, - "stdout": get_text_stdout, - "stderr": get_text_stderr, -} diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/opc/part.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/opc/part.py deleted file mode 100644 index 928d3c1837d78def46227f09a140afe7dcdd645e..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/opc/part.py +++ /dev/null @@ -1,241 +0,0 @@ -# encoding: utf-8 - -""" -Open Packaging Convention (OPC) objects related to package parts. -""" - -from __future__ import ( - absolute_import, division, print_function, unicode_literals -) - -from .compat import cls_method_fn -from .oxml import serialize_part_xml -from ..oxml import parse_xml -from .packuri import PackURI -from .rel import Relationships -from .shared import lazyproperty - - -class Part(object): - """ - Base class for package parts. Provides common properties and methods, but - intended to be subclassed in client code to implement specific part - behaviors. - """ - def __init__(self, partname, content_type, blob=None, package=None): - super(Part, self).__init__() - self._partname = partname - self._content_type = content_type - self._blob = blob - self._package = package - - def after_unmarshal(self): - """ - Entry point for post-unmarshaling processing, for example to parse - the part XML. May be overridden by subclasses without forwarding call - to super. - """ - # don't place any code here, just catch call if not overridden by - # subclass - pass - - def before_marshal(self): - """ - Entry point for pre-serialization processing, for example to finalize - part naming if necessary. May be overridden by subclasses without - forwarding call to super. - """ - # don't place any code here, just catch call if not overridden by - # subclass - pass - - @property - def blob(self): - """ - Contents of this package part as a sequence of bytes. May be text or - binary. Intended to be overridden by subclasses. Default behavior is - to return load blob. - """ - return self._blob - - @property - def content_type(self): - """ - Content type of this part. - """ - return self._content_type - - def drop_rel(self, rId): - """ - Remove the relationship identified by *rId* if its reference count - is less than 2. Relationships with a reference count of 0 are - implicit relationships. - """ - if self._rel_ref_count(rId) < 2: - del self.rels[rId] - - @classmethod - def load(cls, partname, content_type, blob, package): - return cls(partname, content_type, blob, package) - - def load_rel(self, reltype, target, rId, is_external=False): - """ - Return newly added |_Relationship| instance of *reltype* between this - part and *target* with key *rId*. Target mode is set to - ``RTM.EXTERNAL`` if *is_external* is |True|. Intended for use during - load from a serialized package, where the rId is well-known. Other - methods exist for adding a new relationship to a part when - manipulating a part. - """ - return self.rels.add_relationship(reltype, target, rId, is_external) - - @property - def package(self): - """ - |OpcPackage| instance this part belongs to. - """ - return self._package - - @property - def partname(self): - """ - |PackURI| instance holding partname of this part, e.g. - '/ppt/slides/slide1.xml' - """ - return self._partname - - @partname.setter - def partname(self, partname): - if not isinstance(partname, PackURI): - tmpl = "partname must be instance of PackURI, got '%s'" - raise TypeError(tmpl % type(partname).__name__) - self._partname = partname - - def part_related_by(self, reltype): - """ - Return part to which this part has a relationship of *reltype*. - Raises |KeyError| if no such relationship is found and |ValueError| - if more than one such relationship is found. Provides ability to - resolve implicitly related part, such as Slide -> SlideLayout. - """ - return self.rels.part_with_reltype(reltype) - - def relate_to(self, target, reltype, is_external=False): - """ - Return rId key of relationship of *reltype* to *target*, from an - existing relationship if there is one, otherwise a newly created one. - """ - if is_external: - return self.rels.get_or_add_ext_rel(reltype, target) - else: - rel = self.rels.get_or_add(reltype, target) - return rel.rId - - @property - def related_parts(self): - """ - Dictionary mapping related parts by rId, so child objects can resolve - explicit relationships present in the part XML, e.g. sldIdLst to a - specific |Slide| instance. - """ - return self.rels.related_parts - - @lazyproperty - def rels(self): - """ - |Relationships| instance holding the relationships for this part. - """ - return Relationships(self._partname.baseURI) - - def target_ref(self, rId): - """ - Return URL contained in target ref of relationship identified by - *rId*. - """ - rel = self.rels[rId] - return rel.target_ref - - def _rel_ref_count(self, rId): - """ - Return the count of references in this part's XML to the relationship - identified by *rId*. - """ - rIds = self._element.xpath('//@r:id') - return len([_rId for _rId in rIds if _rId == rId]) - - -class PartFactory(object): - """ - Provides a way for client code to specify a subclass of |Part| to be - constructed by |Unmarshaller| based on its content type and/or a custom - callable. Setting ``PartFactory.part_class_selector`` to a callable - object will cause that object to be called with the parameters - ``content_type, reltype``, once for each part in the package. If the - callable returns an object, it is used as the class for that part. If it - returns |None|, part class selection falls back to the content type map - defined in ``PartFactory.part_type_for``. If no class is returned from - either of these, the class contained in ``PartFactory.default_part_type`` - is used to construct the part, which is by default ``opc.package.Part``. - """ - part_class_selector = None - part_type_for = {} - default_part_type = Part - - def __new__(cls, partname, content_type, reltype, blob, package): - PartClass = None - if cls.part_class_selector is not None: - part_class_selector = cls_method_fn(cls, 'part_class_selector') - PartClass = part_class_selector(content_type, reltype) - if PartClass is None: - PartClass = cls._part_cls_for(content_type) - return PartClass.load(partname, content_type, blob, package) - - @classmethod - def _part_cls_for(cls, content_type): - """ - Return the custom part class registered for *content_type*, or the - default part class if no custom class is registered for - *content_type*. - """ - if content_type in cls.part_type_for: - return cls.part_type_for[content_type] - return cls.default_part_type - - -class XmlPart(Part): - """ - Base class for package parts containing an XML payload, which is most of - them. Provides additional methods to the |Part| base class that take care - of parsing and reserializing the XML payload and managing relationships - to other parts. - """ - def __init__(self, partname, content_type, element, package): - super(XmlPart, self).__init__( - partname, content_type, package=package - ) - self._element = element - - @property - def blob(self): - return serialize_part_xml(self._element) - - @property - def element(self): - """ - The root XML element of this XML part. - """ - return self._element - - @classmethod - def load(cls, partname, content_type, blob, package): - element = parse_xml(blob) - return cls(partname, content_type, element, package) - - @property - def part(self): - """ - Part of the parent protocol, "children" of the document will not know - the part that contains them so must ask their parent object. That - chain of delegation ends here for child objects. - """ - return self diff --git a/spaces/cihyFjudo/fairness-paper-search/Download sub A Better Tomorrow III and Watch the Epic Finale of the Trilogy.md b/spaces/cihyFjudo/fairness-paper-search/Download sub A Better Tomorrow III and Watch the Epic Finale of the Trilogy.md deleted file mode 100644 index 2f41d7c3bab418bbec0de40f1c1d23f7515184b1..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Download sub A Better Tomorrow III and Watch the Epic Finale of the Trilogy.md +++ /dev/null @@ -1,22 +0,0 @@ -<br /> -<p>Inclusive and sustainable cities provide better access to education by the urban poor, women and girls. Inclusive and equitable education provides adequate skills for decent jobs and improved living conditions.</p> -<h2>download sub A Better Tomorrow III</h2><br /><p><b><b>DOWNLOAD</b> –––––>>> <a href="https://tinurli.com/2uwhT3">https://tinurli.com/2uwhT3</a></b></p><br /><br /> -<p>The theme of WUF11, Transforming our Cities for a Better Urban Future, will provide greater insights and clarity on the future of cities based on existing trends, challenges and opportunities, as well as suggest ways cities can be better prepared to address future pandemics and a wide range of other shocks.</p> -<p>Social and emotional learning (SEL) is the process through which all young people and adults acquire and apply the knowledge, skills, and attitudes to develop healthy identities, manage emotions and achieve personal and collective goals, feel and show empathy for others, establish and maintain supportive relationships, and make responsible and caring decisions. Students in SEL programs are more likely to attend school and receive better grades, and are less likely to have conduct problems. Successful infusion of SEL can result in positive behaviors, increased academic success, and caring communities.</p> -<p>The interlinkages and integrated nature of the Sustainable Development Goals are of crucial importance in ensuring that the purpose of the new Agenda is realised. If we realize our ambitions across the full extent of the Agenda, the lives of all will be profoundly improved and our world will be transformed for the better.</p> -<p>50. Today we are also taking a decision of great historic significance. We resolve to build a better future for all people, including the millions who have been denied the chance to lead decent, dignified and rewarding lives and to achieve their full human potential. We can be the first generation to succeed in ending poverty; just as we may be the last to have a chance of saving the planet. The world will be a better place in 2030 if we succeed in our objectives.</p> -<p></p> -<p>57. We recognize that baseline data for several of the targets remain unavailable, and we call for increased support for strengthening data collection and capacity building in Member States, to develop national and global baselines where they do not yet exist. We commit to addressing this gap in data collection so as to better inform the measurement of progress, in particular for those targets below which do not have clear numerical targets.</p> -<p>65. We recognize that middle-income countries still face significant challenges to achieve sustainable development. In order to ensure that achievements made to date are sustained, efforts to address ongoing challenges should be strengthened through the exchange of experiences, improved coordination, and better and focused support of the United Nations Development System, the international financial institutions, regional organizations and other stakeholders.</p> -<p>The New Urban Agenda represents a shared vision for a better and more sustainable future. If well-planned and well-managed, urbanization can be a powerful tool for sustainable development for both developing and developed countries.</p> -<p>Avantor®, a Fortune 500 company, is a leading global provider of mission-critical products and services to customers in the biopharma, healthcare, education & government, and advanced technologies & applied materials industries. Our portfolio is used in virtually every stage of the most important research, development and production activities in the industries we serve. Our global footprint enables us to serve more than 300,000 customer locations and gives us extensive access to research laboratories and scientists in more than 180 countries. We set science in motion to create a better world.</p> -<p>Users who require only a single level of HydroBASINS can use the following trees. To download a file, please navigate to the desired continent and level of data, and then click to download that file. Files are also available which contain layers for levels 1-6, as well as for level 0 (available for standard data only). Note that the files containing all levels (1-12) are the same files as available in the table above.</p> -<p>Finacle is an industry leader in digital banking solutions. We partner with emerging and established financial institutions to inspire better banking. Our cloud-native solution suite and SaaS services help banks to engage, innovate, operate, and transform better.</p> -<p>Finacle help banks engage better with their customers, employees, and partners. We do this by helping banks design and deliver truly personalized products and services. Built on a unique engagement hub, our suite helps banks onboard, sell, service, and converse better with customers. In fact, banks running on Finacle have realized an average 19% improvement in their NPS scores.</p> -<p>Finacle helps banks to transform better so that they can stay relevant to evolving market dynamics. With a componentized digital suite and flexible deployment options, we empower banks to mitigate risk and transform and upgrade in a phased manner. Our DevOps toolchain helps banks to build, test, deploy and monitor new capabilities with speed to stay ahead of the competition.</p> -<p>If you need to make changes to the autogenerated transcript / caption, you can download the caption file and edit it in a text editor of choice before uploading it back to Stream, or you can edit the transcript directly in Microsoft Stream (Classic) in the transcript window.</p> -<p>Microsoft Stream (Classic) doesn't have a built in way to do this, but you can download the transcript (as listed above) and use a simple web utility to extract the transcript text from your downloaded VTT file. This web utility allows you to pick VTT files from your computer and get a copy of the text portion of the VTT file containing just the transcript.</p> -<p>Visualizations are colors, shapes, and patterns that move to the music in Windows Media Player Now Playing mode. The Player comes with a number of visualizations, and you can download more on this page.</p> -<p>** Click here to download the large Image **<br /><br />United States Indo-Pacific Command (USINDOPACOM) is one of six geographic combatant commands defined by the Department of Defense's Unified Command Plan (UCP). As a geographic combatant command, USINDOPACOM is in charge of using and integrating United States Army, Navy, Air Force and Marine Corps forces within the USINDOPACOM area of responsibility (AOR) to achieve U.S. national security objectives while protecting national interests. The USINDOPACOM AOR covers more of the globe of any of the other geographic combatant commands and shares borders with all of the other five geographic combatant commands. The commander of US Indo-Pacific Command reports to the President of the United States through the Secretary of Defense and is supported by multiple component and sub-unified commands including: U.S. Forces Korea, US Forces Japan, U.S. Special Operations Command Pacific, U.S. Pacific Fleet, U.S. Marine Forces Pacific, U.S. Pacific Air Forces and U.S. Army Pacific.</p> aaccfb2cb3<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Enjoy Fast and High-Quality Video Conversion with KeepVid Video Converter 1.0.0.13 Full MAC Crack OS X MacOSX.md b/spaces/cihyFjudo/fairness-paper-search/Enjoy Fast and High-Quality Video Conversion with KeepVid Video Converter 1.0.0.13 Full MAC Crack OS X MacOSX.md deleted file mode 100644 index d35079fc3fc395625e226a26ee6a34071d9315bb..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Enjoy Fast and High-Quality Video Conversion with KeepVid Video Converter 1.0.0.13 Full MAC Crack OS X MacOSX.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>KeepVid Video Converter 1.0.0.13 Full MAC Crack OS X MacOSX</h2><br /><p><b><b>DOWNLOAD</b> · <a href="https://tinurli.com/2uwipN">https://tinurli.com/2uwipN</a></b></p><br /><br /> - - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/cihyFjudo/fairness-paper-search/Maine Pyaar Kyun Kiya Movie In Hindi 720p Download Enjoy The Hit Songs And Funny Dialogues Of This Bollywood Film.md b/spaces/cihyFjudo/fairness-paper-search/Maine Pyaar Kyun Kiya Movie In Hindi 720p Download Enjoy The Hit Songs And Funny Dialogues Of This Bollywood Film.md deleted file mode 100644 index c6ceb8efc1e25ebc1b4e7acd6a6d3b85633e4329..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Maine Pyaar Kyun Kiya Movie In Hindi 720p Download Enjoy The Hit Songs And Funny Dialogues Of This Bollywood Film.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Maine Pyaar Kyun Kiya Movie In Hindi Download 720p</h2><br /><p><b><b>Download Zip</b> ✵✵✵ <a href="https://tinurli.com/2uwjKs">https://tinurli.com/2uwjKs</a></b></p><br /><br /> -<br /> - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/cihyFjudo/fairness-paper-search/Orchid Fever A Horticultural Tale of Love Lust and Lunacy - Free Download in Multiple Formats.md b/spaces/cihyFjudo/fairness-paper-search/Orchid Fever A Horticultural Tale of Love Lust and Lunacy - Free Download in Multiple Formats.md deleted file mode 100644 index 753f6b439ff9f81562479055b3305c6207a3e79b..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Orchid Fever A Horticultural Tale of Love Lust and Lunacy - Free Download in Multiple Formats.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Satvapariksha Marathi Movie Song Downloadinstmank genie coordonnee ham</h2><br /><p><b><b>Download</b> >>>>> <a href="https://tinurli.com/2uwjTf">https://tinurli.com/2uwjTf</a></b></p><br /><br /> - - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/cihyFjudo/fairness-paper-search/Origin Key SimCity 5 A Guide to the Latest Installment of the SimCity Series.md b/spaces/cihyFjudo/fairness-paper-search/Origin Key SimCity 5 A Guide to the Latest Installment of the SimCity Series.md deleted file mode 100644 index 537409c14591d1f6cbe6df1be7394f1147fe92c2..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Origin Key SimCity 5 A Guide to the Latest Installment of the SimCity Series.md +++ /dev/null @@ -1,12 +0,0 @@ - -<p>simcity is a really nice game. really hope that the online service works better now. There were a lot of problems with building simultanouisly on a Great Work. My status report said it was finished, my friends report was that it had to start construction! If this is fixed, im definitely gonna play again.<br />Love it!</p> -<p>"We did not focus on the 'single city in isolation' that we have delivered in past SimCities," she said. "We recognise that there are fans - people who love the original SimCity - who want that. But we're also hearing from thousands of people who are playing across regions, trading, communicating and loving the Always-Connected functionality. The SimCity we delivered captures the magic of its heritage but catches up with ever-improving technology."</p> -<h2>origin key simcity 5</h2><br /><p><b><b>Download Zip</b> 🔗 <a href="https://tinurli.com/2uwjs4">https://tinurli.com/2uwjs4</a></b></p><br /><br /> -<p><i><b>SimCity</b></i> is an open-ended city-building video game series originally designed by Will Wright. The first game in the series, <i>SimCity</i>, was published by Maxis in 1989 and were followed by several sequels and many other spin-off "<i>Sim</i>" titles, including 2000's <i>The Sims</i>, which itself became a best-selling computer game and franchise.[1] Maxis developed the series independently until 1997, and continued under the ownership of Electronic Arts until 2003. EA commissioned various spinoffs from other companies during the 2000s, focusing on console and mobile releases. A 2013 EA-Maxis reboot was subject to what has been described as "one of the most disastrous launches in history", which may have triggered the 2015 shutdown of Maxis Emeryville and the end of the franchise.[2][3]</p> -<p>The primary source of income is taxation, though some income can be generated by legalizing gambling or placing certain "special" buildings such as military bases or prisons. The player may make deals with neighbouring cities to sell or buy services, as long as a connection is made to the neighbour for that service, such as electricity cables. The player may have to deal with disasters, such as fires and tornadoes, or fictional crises such as monster attacks. <i>SimCity</i> titles are predominantly single-player games, with a few exceptions, including the "Network Edition" of <i>SimCity 2000</i>, the Unix port of the original <i>SimCity</i>, and <i>SimCity</i> (2013).[4] <i>SimCity 4</i> provided a limited form of multiplayer gaming with the ability to share regional maps and cities with other players, allowing players to collaborate, but not to interact in real-time gameplay.[5][6][7]</p> -<p>Development of the original <i>SimCity</i> began in 1985 under game designer Will Wright, and the game was published in 1989.[8] Wright was inspired by a map creation feature of the game <i>Raid on Bungeling Bay</i> that led him to discover that he enjoyed creating maps more than playing the actual game.[9] While developing <i>SimCity</i>, Wright cultivated a love of the intricacies and theories of urban planning[10] and acknowledges the influence of Jay Wright Forrester's book <i>Urban Dynamics</i>.[11][12] In addition, Wright was inspired by reading "The Seventh Sally", a short story by Stanisław Lem from <i>The Cyberiad</i>, published in the collection <i>The Mind's I</i>, in which an engineer encounters a deposed tyrant, and creates a miniature city with artificial citizens for the tyrant to oppress.[13]</p> -<p><i>SimCity</i> was released in 1990 on the ZX Spectrum 48K and 128K by Infogrames. The SNES port was very similar to the original edition but had some unique features, including Reward buildings, a Mario statue and possible attacks by a giant Bowser.[<i>citation needed</i>]</p> -<p>The unexpected and enduring success of the original <i>SimCity</i>, combined with other "<i>Sim</i>" titles' relative lack of success at the time, motivated the development of a sequel. <i>SimCity 2000</i> released in 1993[16] with an isometric view instead of overhead. Underground layers were introduced for water pipes and subways, along with many new buildings, more elaborate financial controls and many other improvements.[17]</p> -<p>On January 10, 2008, the source code of the original game was released under the free software GPL 3 license.[28] The release of the source code was related to the donation of <i>SimCity</i> software to the One Laptop Per Child laptop, as one of the principles of the OLPC laptop is the use of free and open source software. The open source version was called <i>Micropolis</i>, since EA retained the trademark <i>SimCity</i>.</p> aaccfb2cb3<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/anyio/lowlevel.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/anyio/lowlevel.py deleted file mode 100644 index 0e908c65474402fa89fe933d65205378c543e3bf..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/anyio/lowlevel.py +++ /dev/null @@ -1,174 +0,0 @@ -from __future__ import annotations - -import enum -import sys -from dataclasses import dataclass -from typing import Any, Generic, TypeVar, overload -from weakref import WeakKeyDictionary - -from ._core._eventloop import get_asynclib - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from typing_extensions import Literal - -T = TypeVar("T") -D = TypeVar("D") - - -async def checkpoint() -> None: - """ - Check for cancellation and allow the scheduler to switch to another task. - - Equivalent to (but more efficient than):: - - await checkpoint_if_cancelled() - await cancel_shielded_checkpoint() - - - .. versionadded:: 3.0 - - """ - await get_asynclib().checkpoint() - - -async def checkpoint_if_cancelled() -> None: - """ - Enter a checkpoint if the enclosing cancel scope has been cancelled. - - This does not allow the scheduler to switch to a different task. - - .. versionadded:: 3.0 - - """ - await get_asynclib().checkpoint_if_cancelled() - - -async def cancel_shielded_checkpoint() -> None: - """ - Allow the scheduler to switch to another task but without checking for cancellation. - - Equivalent to (but potentially more efficient than):: - - with CancelScope(shield=True): - await checkpoint() - - - .. versionadded:: 3.0 - - """ - await get_asynclib().cancel_shielded_checkpoint() - - -def current_token() -> object: - """Return a backend specific token object that can be used to get back to the event loop.""" - return get_asynclib().current_token() - - -_run_vars: WeakKeyDictionary[Any, dict[str, Any]] = WeakKeyDictionary() -_token_wrappers: dict[Any, _TokenWrapper] = {} - - -@dataclass(frozen=True) -class _TokenWrapper: - __slots__ = "_token", "__weakref__" - _token: object - - -class _NoValueSet(enum.Enum): - NO_VALUE_SET = enum.auto() - - -class RunvarToken(Generic[T]): - __slots__ = "_var", "_value", "_redeemed" - - def __init__(self, var: RunVar[T], value: T | Literal[_NoValueSet.NO_VALUE_SET]): - self._var = var - self._value: T | Literal[_NoValueSet.NO_VALUE_SET] = value - self._redeemed = False - - -class RunVar(Generic[T]): - """ - Like a :class:`~contextvars.ContextVar`, except scoped to the running event loop. - """ - - __slots__ = "_name", "_default" - - NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET - - _token_wrappers: set[_TokenWrapper] = set() - - def __init__( - self, - name: str, - default: T | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET, - ): - self._name = name - self._default = default - - @property - def _current_vars(self) -> dict[str, T]: - token = current_token() - while True: - try: - return _run_vars[token] - except TypeError: - # Happens when token isn't weak referable (TrioToken). - # This workaround does mean that some memory will leak on Trio until the problem - # is fixed on their end. - token = _TokenWrapper(token) - self._token_wrappers.add(token) - except KeyError: - run_vars = _run_vars[token] = {} - return run_vars - - @overload - def get(self, default: D) -> T | D: - ... - - @overload - def get(self) -> T: - ... - - def get( - self, default: D | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET - ) -> T | D: - try: - return self._current_vars[self._name] - except KeyError: - if default is not RunVar.NO_VALUE_SET: - return default - elif self._default is not RunVar.NO_VALUE_SET: - return self._default - - raise LookupError( - f'Run variable "{self._name}" has no value and no default set' - ) - - def set(self, value: T) -> RunvarToken[T]: - current_vars = self._current_vars - token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET)) - current_vars[self._name] = value - return token - - def reset(self, token: RunvarToken[T]) -> None: - if token._var is not self: - raise ValueError("This token does not belong to this RunVar") - - if token._redeemed: - raise ValueError("This token has already been used") - - if token._value is _NoValueSet.NO_VALUE_SET: - try: - del self._current_vars[self._name] - except KeyError: - pass - else: - self._current_vars[self._name] = token._value - - token._redeemed = True - - def __repr__(self) -> str: - return f"<RunVar name={self._name!r}>" diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/certifi/core.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/certifi/core.py deleted file mode 100644 index de028981b97e1fcc8ef4ab2c817cc8731b9c8738..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/certifi/core.py +++ /dev/null @@ -1,108 +0,0 @@ -""" -certifi.py -~~~~~~~~~~ - -This module returns the installation location of cacert.pem or its contents. -""" -import sys - - -if sys.version_info >= (3, 11): - - from importlib.resources import as_file, files - - _CACERT_CTX = None - _CACERT_PATH = None - - def where() -> str: - # This is slightly terrible, but we want to delay extracting the file - # in cases where we're inside of a zipimport situation until someone - # actually calls where(), but we don't want to re-extract the file - # on every call of where(), so we'll do it once then store it in a - # global variable. - global _CACERT_CTX - global _CACERT_PATH - if _CACERT_PATH is None: - # This is slightly janky, the importlib.resources API wants you to - # manage the cleanup of this file, so it doesn't actually return a - # path, it returns a context manager that will give you the path - # when you enter it and will do any cleanup when you leave it. In - # the common case of not needing a temporary file, it will just - # return the file system location and the __exit__() is a no-op. - # - # We also have to hold onto the actual context manager, because - # it will do the cleanup whenever it gets garbage collected, so - # we will also store that at the global level as well. - _CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem")) - _CACERT_PATH = str(_CACERT_CTX.__enter__()) - - return _CACERT_PATH - - def contents() -> str: - return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii") - -elif sys.version_info >= (3, 7): - - from importlib.resources import path as get_path, read_text - - _CACERT_CTX = None - _CACERT_PATH = None - - def where() -> str: - # This is slightly terrible, but we want to delay extracting the - # file in cases where we're inside of a zipimport situation until - # someone actually calls where(), but we don't want to re-extract - # the file on every call of where(), so we'll do it once then store - # it in a global variable. - global _CACERT_CTX - global _CACERT_PATH - if _CACERT_PATH is None: - # This is slightly janky, the importlib.resources API wants you - # to manage the cleanup of this file, so it doesn't actually - # return a path, it returns a context manager that will give - # you the path when you enter it and will do any cleanup when - # you leave it. In the common case of not needing a temporary - # file, it will just return the file system location and the - # __exit__() is a no-op. - # - # We also have to hold onto the actual context manager, because - # it will do the cleanup whenever it gets garbage collected, so - # we will also store that at the global level as well. - _CACERT_CTX = get_path("certifi", "cacert.pem") - _CACERT_PATH = str(_CACERT_CTX.__enter__()) - - return _CACERT_PATH - - def contents() -> str: - return read_text("certifi", "cacert.pem", encoding="ascii") - -else: - import os - import types - from typing import Union - - Package = Union[types.ModuleType, str] - Resource = Union[str, "os.PathLike"] - - # This fallback will work for Python versions prior to 3.7 that lack the - # importlib.resources module but relies on the existing `where` function - # so won't address issues with environments like PyOxidizer that don't set - # __file__ on modules. - def read_text( - package: Package, - resource: Resource, - encoding: str = 'utf-8', - errors: str = 'strict' - ) -> str: - with open(where(), encoding=encoding) as data: - return data.read() - - # If we don't have importlib.resources, then we will just do the old logic - # of assuming we're on the filesystem and munge the path directly. - def where() -> str: - f = os.path.dirname(__file__) - - return os.path.join(f, "cacert.pem") - - def contents() -> str: - return read_text("certifi", "cacert.pem", encoding="ascii") diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/otlLib/optimize/gpos.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/otlLib/optimize/gpos.py deleted file mode 100644 index 0acd9ed04c141c532cf7fafda220b3a898106415..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/otlLib/optimize/gpos.py +++ /dev/null @@ -1,452 +0,0 @@ -import logging -import os -from collections import defaultdict, namedtuple -from functools import reduce -from itertools import chain -from math import log2 -from typing import DefaultDict, Dict, Iterable, List, Sequence, Tuple - -from fontTools.config import OPTIONS -from fontTools.misc.intTools import bit_count, bit_indices -from fontTools.ttLib import TTFont -from fontTools.ttLib.tables import otBase, otTables - -log = logging.getLogger(__name__) - -COMPRESSION_LEVEL = OPTIONS[f"{__name__}:COMPRESSION_LEVEL"] - -# Kept because ufo2ft depends on it, to be removed once ufo2ft uses the config instead -# https://github.com/fonttools/fonttools/issues/2592 -GPOS_COMPACT_MODE_ENV_KEY = "FONTTOOLS_GPOS_COMPACT_MODE" -GPOS_COMPACT_MODE_DEFAULT = str(COMPRESSION_LEVEL.default) - - -def _compression_level_from_env() -> int: - env_level = GPOS_COMPACT_MODE_DEFAULT - if GPOS_COMPACT_MODE_ENV_KEY in os.environ: - import warnings - - warnings.warn( - f"'{GPOS_COMPACT_MODE_ENV_KEY}' environment variable is deprecated. " - "Please set the 'fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL' option " - "in TTFont.cfg.", - DeprecationWarning, - ) - - env_level = os.environ[GPOS_COMPACT_MODE_ENV_KEY] - if len(env_level) == 1 and env_level in "0123456789": - return int(env_level) - raise ValueError(f"Bad {GPOS_COMPACT_MODE_ENV_KEY}={env_level}") - - -def compact(font: TTFont, level: int) -> TTFont: - # Ideal plan: - # 1. Find lookups of Lookup Type 2: Pair Adjustment Positioning Subtable - # https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#lookup-type-2-pair-adjustment-positioning-subtable - # 2. Extract glyph-glyph kerning and class-kerning from all present subtables - # 3. Regroup into different subtable arrangements - # 4. Put back into the lookup - # - # Actual implementation: - # 2. Only class kerning is optimized currently - # 3. If the input kerning is already in several subtables, the subtables - # are not grouped together first; instead each subtable is treated - # independently, so currently this step is: - # Split existing subtables into more smaller subtables - gpos = font["GPOS"] - for lookup in gpos.table.LookupList.Lookup: - if lookup.LookupType == 2: - compact_lookup(font, level, lookup) - elif lookup.LookupType == 9 and lookup.SubTable[0].ExtensionLookupType == 2: - compact_ext_lookup(font, level, lookup) - return font - - -def compact_lookup(font: TTFont, level: int, lookup: otTables.Lookup) -> None: - new_subtables = compact_pair_pos(font, level, lookup.SubTable) - lookup.SubTable = new_subtables - lookup.SubTableCount = len(new_subtables) - - -def compact_ext_lookup(font: TTFont, level: int, lookup: otTables.Lookup) -> None: - new_subtables = compact_pair_pos( - font, level, [ext_subtable.ExtSubTable for ext_subtable in lookup.SubTable] - ) - new_ext_subtables = [] - for subtable in new_subtables: - ext_subtable = otTables.ExtensionPos() - ext_subtable.Format = 1 - ext_subtable.ExtSubTable = subtable - new_ext_subtables.append(ext_subtable) - lookup.SubTable = new_ext_subtables - lookup.SubTableCount = len(new_ext_subtables) - - -def compact_pair_pos( - font: TTFont, level: int, subtables: Sequence[otTables.PairPos] -) -> Sequence[otTables.PairPos]: - new_subtables = [] - for subtable in subtables: - if subtable.Format == 1: - # Not doing anything to Format 1 (yet?) - new_subtables.append(subtable) - elif subtable.Format == 2: - new_subtables.extend(compact_class_pairs(font, level, subtable)) - return new_subtables - - -def compact_class_pairs( - font: TTFont, level: int, subtable: otTables.PairPos -) -> List[otTables.PairPos]: - from fontTools.otlLib.builder import buildPairPosClassesSubtable - - subtables = [] - classes1: DefaultDict[int, List[str]] = defaultdict(list) - for g in subtable.Coverage.glyphs: - classes1[subtable.ClassDef1.classDefs.get(g, 0)].append(g) - classes2: DefaultDict[int, List[str]] = defaultdict(list) - for g, i in subtable.ClassDef2.classDefs.items(): - classes2[i].append(g) - all_pairs = {} - for i, class1 in enumerate(subtable.Class1Record): - for j, class2 in enumerate(class1.Class2Record): - if is_really_zero(class2): - continue - all_pairs[(tuple(sorted(classes1[i])), tuple(sorted(classes2[j])))] = ( - getattr(class2, "Value1", None), - getattr(class2, "Value2", None), - ) - grouped_pairs = cluster_pairs_by_class2_coverage_custom_cost(font, all_pairs, level) - for pairs in grouped_pairs: - subtables.append(buildPairPosClassesSubtable(pairs, font.getReverseGlyphMap())) - return subtables - - -def is_really_zero(class2: otTables.Class2Record) -> bool: - v1 = getattr(class2, "Value1", None) - v2 = getattr(class2, "Value2", None) - return (v1 is None or v1.getEffectiveFormat() == 0) and ( - v2 is None or v2.getEffectiveFormat() == 0 - ) - - -Pairs = Dict[ - Tuple[Tuple[str, ...], Tuple[str, ...]], - Tuple[otBase.ValueRecord, otBase.ValueRecord], -] - -# Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L935-L958 -def _getClassRanges(glyphIDs: Iterable[int]): - glyphIDs = sorted(glyphIDs) - last = glyphIDs[0] - ranges = [[last]] - for glyphID in glyphIDs[1:]: - if glyphID != last + 1: - ranges[-1].append(last) - ranges.append([glyphID]) - last = glyphID - ranges[-1].append(last) - return ranges, glyphIDs[0], glyphIDs[-1] - - -# Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L960-L989 -def _classDef_bytes( - class_data: List[Tuple[List[Tuple[int, int]], int, int]], - class_ids: List[int], - coverage=False, -): - if not class_ids: - return 0 - first_ranges, min_glyph_id, max_glyph_id = class_data[class_ids[0]] - range_count = len(first_ranges) - for i in class_ids[1:]: - data = class_data[i] - range_count += len(data[0]) - min_glyph_id = min(min_glyph_id, data[1]) - max_glyph_id = max(max_glyph_id, data[2]) - glyphCount = max_glyph_id - min_glyph_id + 1 - # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-1 - format1_bytes = 6 + glyphCount * 2 - # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-2 - format2_bytes = 4 + range_count * 6 - return min(format1_bytes, format2_bytes) - - -ClusteringContext = namedtuple( - "ClusteringContext", - [ - "lines", - "all_class1", - "all_class1_data", - "all_class2_data", - "valueFormat1_bytes", - "valueFormat2_bytes", - ], -) - - -class Cluster: - # TODO(Python 3.7): Turn this into a dataclass - # ctx: ClusteringContext - # indices: int - # Caches - # TODO(Python 3.8): use functools.cached_property instead of the - # manually cached properties, and remove the cache fields listed below. - # _indices: Optional[List[int]] = None - # _column_indices: Optional[List[int]] = None - # _cost: Optional[int] = None - - __slots__ = "ctx", "indices_bitmask", "_indices", "_column_indices", "_cost" - - def __init__(self, ctx: ClusteringContext, indices_bitmask: int): - self.ctx = ctx - self.indices_bitmask = indices_bitmask - self._indices = None - self._column_indices = None - self._cost = None - - @property - def indices(self): - if self._indices is None: - self._indices = bit_indices(self.indices_bitmask) - return self._indices - - @property - def column_indices(self): - if self._column_indices is None: - # Indices of columns that have a 1 in at least 1 line - # => binary OR all the lines - bitmask = reduce(int.__or__, (self.ctx.lines[i] for i in self.indices)) - self._column_indices = bit_indices(bitmask) - return self._column_indices - - @property - def width(self): - # Add 1 because Class2=0 cannot be used but needs to be encoded. - return len(self.column_indices) + 1 - - @property - def cost(self): - if self._cost is None: - self._cost = ( - # 2 bytes to store the offset to this subtable in the Lookup table above - 2 - # Contents of the subtable - # From: https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#pair-adjustment-positioning-format-2-class-pair-adjustment - # uint16 posFormat Format identifier: format = 2 - + 2 - # Offset16 coverageOffset Offset to Coverage table, from beginning of PairPos subtable. - + 2 - + self.coverage_bytes - # uint16 valueFormat1 ValueRecord definition — for the first glyph of the pair (may be zero). - + 2 - # uint16 valueFormat2 ValueRecord definition — for the second glyph of the pair (may be zero). - + 2 - # Offset16 classDef1Offset Offset to ClassDef table, from beginning of PairPos subtable — for the first glyph of the pair. - + 2 - + self.classDef1_bytes - # Offset16 classDef2Offset Offset to ClassDef table, from beginning of PairPos subtable — for the second glyph of the pair. - + 2 - + self.classDef2_bytes - # uint16 class1Count Number of classes in classDef1 table — includes Class 0. - + 2 - # uint16 class2Count Number of classes in classDef2 table — includes Class 0. - + 2 - # Class1Record class1Records[class1Count] Array of Class1 records, ordered by classes in classDef1. - + (self.ctx.valueFormat1_bytes + self.ctx.valueFormat2_bytes) - * len(self.indices) - * self.width - ) - return self._cost - - @property - def coverage_bytes(self): - format1_bytes = ( - # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-1 - # uint16 coverageFormat Format identifier — format = 1 - # uint16 glyphCount Number of glyphs in the glyph array - 4 - # uint16 glyphArray[glyphCount] Array of glyph IDs — in numerical order - + sum(len(self.ctx.all_class1[i]) for i in self.indices) * 2 - ) - ranges = sorted( - chain.from_iterable(self.ctx.all_class1_data[i][0] for i in self.indices) - ) - merged_range_count = 0 - last = None - for (start, end) in ranges: - if last is not None and start != last + 1: - merged_range_count += 1 - last = end - format2_bytes = ( - # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-2 - # uint16 coverageFormat Format identifier — format = 2 - # uint16 rangeCount Number of RangeRecords - 4 - # RangeRecord rangeRecords[rangeCount] Array of glyph ranges — ordered by startGlyphID. - # uint16 startGlyphID First glyph ID in the range - # uint16 endGlyphID Last glyph ID in the range - # uint16 startCoverageIndex Coverage Index of first glyph ID in range - + merged_range_count * 6 - ) - return min(format1_bytes, format2_bytes) - - @property - def classDef1_bytes(self): - # We can skip encoding one of the Class1 definitions, and use - # Class1=0 to represent it instead, because Class1 is gated by the - # Coverage definition. Use Class1=0 for the highest byte savings. - # Going through all options takes too long, pick the biggest class - # = what happens in otlLib.builder.ClassDefBuilder.classes() - biggest_index = max(self.indices, key=lambda i: len(self.ctx.all_class1[i])) - return _classDef_bytes( - self.ctx.all_class1_data, [i for i in self.indices if i != biggest_index] - ) - - @property - def classDef2_bytes(self): - # All Class2 need to be encoded because we can't use Class2=0 - return _classDef_bytes(self.ctx.all_class2_data, self.column_indices) - - -def cluster_pairs_by_class2_coverage_custom_cost( - font: TTFont, - pairs: Pairs, - compression: int = 5, -) -> List[Pairs]: - if not pairs: - # The subtable was actually empty? - return [pairs] - - # Sorted for reproducibility/determinism - all_class1 = sorted(set(pair[0] for pair in pairs)) - all_class2 = sorted(set(pair[1] for pair in pairs)) - - # Use Python's big ints for binary vectors representing each line - lines = [ - sum( - 1 << i if (class1, class2) in pairs else 0 - for i, class2 in enumerate(all_class2) - ) - for class1 in all_class1 - ] - - # Map glyph names to ids and work with ints throughout for ClassDef formats - name_to_id = font.getReverseGlyphMap() - # Each entry in the arrays below is (range_count, min_glyph_id, max_glyph_id) - all_class1_data = [ - _getClassRanges(name_to_id[name] for name in cls) for cls in all_class1 - ] - all_class2_data = [ - _getClassRanges(name_to_id[name] for name in cls) for cls in all_class2 - ] - - format1 = 0 - format2 = 0 - for pair, value in pairs.items(): - format1 |= value[0].getEffectiveFormat() if value[0] else 0 - format2 |= value[1].getEffectiveFormat() if value[1] else 0 - valueFormat1_bytes = bit_count(format1) * 2 - valueFormat2_bytes = bit_count(format2) * 2 - - ctx = ClusteringContext( - lines, - all_class1, - all_class1_data, - all_class2_data, - valueFormat1_bytes, - valueFormat2_bytes, - ) - - cluster_cache: Dict[int, Cluster] = {} - - def make_cluster(indices: int) -> Cluster: - cluster = cluster_cache.get(indices, None) - if cluster is not None: - return cluster - cluster = Cluster(ctx, indices) - cluster_cache[indices] = cluster - return cluster - - def merge(cluster: Cluster, other: Cluster) -> Cluster: - return make_cluster(cluster.indices_bitmask | other.indices_bitmask) - - # Agglomerative clustering by hand, checking the cost gain of the new - # cluster against the previously separate clusters - # Start with 1 cluster per line - # cluster = set of lines = new subtable - clusters = [make_cluster(1 << i) for i in range(len(lines))] - - # Cost of 1 cluster with everything - # `(1 << len) - 1` gives a bitmask full of 1's of length `len` - cost_before_splitting = make_cluster((1 << len(lines)) - 1).cost - log.debug(f" len(clusters) = {len(clusters)}") - - while len(clusters) > 1: - lowest_cost_change = None - best_cluster_index = None - best_other_index = None - best_merged = None - for i, cluster in enumerate(clusters): - for j, other in enumerate(clusters[i + 1 :]): - merged = merge(cluster, other) - cost_change = merged.cost - cluster.cost - other.cost - if lowest_cost_change is None or cost_change < lowest_cost_change: - lowest_cost_change = cost_change - best_cluster_index = i - best_other_index = i + 1 + j - best_merged = merged - assert lowest_cost_change is not None - assert best_cluster_index is not None - assert best_other_index is not None - assert best_merged is not None - - # If the best merge we found is still taking down the file size, then - # there's no question: we must do it, because it's beneficial in both - # ways (lower file size and lower number of subtables). However, if the - # best merge we found is not reducing file size anymore, then we need to - # look at the other stop criteria = the compression factor. - if lowest_cost_change > 0: - # Stop critera: check whether we should keep merging. - # Compute size reduction brought by splitting - cost_after_splitting = sum(c.cost for c in clusters) - # size_reduction so that after = before * (1 - size_reduction) - # E.g. before = 1000, after = 800, 1 - 800/1000 = 0.2 - size_reduction = 1 - cost_after_splitting / cost_before_splitting - - # Force more merging by taking into account the compression number. - # Target behaviour: compression number = 1 to 9, default 5 like gzip - # - 1 = accept to add 1 subtable to reduce size by 50% - # - 5 = accept to add 5 subtables to reduce size by 50% - # See https://github.com/harfbuzz/packtab/blob/master/Lib/packTab/__init__.py#L690-L691 - # Given the size reduction we have achieved so far, compute how many - # new subtables are acceptable. - max_new_subtables = -log2(1 - size_reduction) * compression - log.debug( - f" len(clusters) = {len(clusters):3d} size_reduction={size_reduction:5.2f} max_new_subtables={max_new_subtables}", - ) - if compression == 9: - # Override level 9 to mean: create any number of subtables - max_new_subtables = len(clusters) - - # If we have managed to take the number of new subtables below the - # threshold, then we can stop. - if len(clusters) <= max_new_subtables + 1: - break - - # No reason to stop yet, do the merge and move on to the next. - del clusters[best_other_index] - clusters[best_cluster_index] = best_merged - - # All clusters are final; turn bitmasks back into the "Pairs" format - pairs_by_class1: Dict[Tuple[str, ...], Pairs] = defaultdict(dict) - for pair, values in pairs.items(): - pairs_by_class1[pair[0]][pair] = values - pairs_groups: List[Pairs] = [] - for cluster in clusters: - pairs_group: Pairs = dict() - for i in cluster.indices: - class1 = all_class1[i] - pairs_group.update(pairs_by_class1[class1]) - pairs_groups.append(pairs_group) - return pairs_groups diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/pens/svgPathPen.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/pens/svgPathPen.py deleted file mode 100644 index ae6ebfbd5333a10f665d0b879d976294b2b9993e..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/pens/svgPathPen.py +++ /dev/null @@ -1,293 +0,0 @@ -from typing import Callable -from fontTools.pens.basePen import BasePen - - -def pointToString(pt, ntos=str): - return " ".join(ntos(i) for i in pt) - - -class SVGPathPen(BasePen): - """Pen to draw SVG path d commands. - - Example:: - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 0)) - >>> pen.lineTo((1, 1)) - >>> pen.curveTo((2, 2), (3, 3), (4, 4)) - >>> pen.closePath() - >>> pen.getCommands() - 'M0 0 1 1C2 2 3 3 4 4Z' - - Args: - glyphSet: a dictionary of drawable glyph objects keyed by name - used to resolve component references in composite glyphs. - ntos: a callable that takes a number and returns a string, to - customize how numbers are formatted (default: str). - - Note: - Fonts have a coordinate system where Y grows up, whereas in SVG, - Y grows down. As such, rendering path data from this pen in - SVG typically results in upside-down glyphs. You can fix this - by wrapping the data from this pen in an SVG group element with - transform, or wrap this pen in a transform pen. For example: - - spen = svgPathPen.SVGPathPen(glyphset) - pen= TransformPen(spen , (1, 0, 0, -1, 0, 0)) - glyphset[glyphname].draw(pen) - print(tpen.getCommands()) - """ - - def __init__(self, glyphSet, ntos: Callable[[float], str] = str): - BasePen.__init__(self, glyphSet) - self._commands = [] - self._lastCommand = None - self._lastX = None - self._lastY = None - self._ntos = ntos - - def _handleAnchor(self): - """ - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 0)) - >>> pen.moveTo((10, 10)) - >>> pen._commands - ['M10 10'] - """ - if self._lastCommand == "M": - self._commands.pop(-1) - - def _moveTo(self, pt): - """ - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 0)) - >>> pen._commands - ['M0 0'] - - >>> pen = SVGPathPen(None) - >>> pen.moveTo((10, 0)) - >>> pen._commands - ['M10 0'] - - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 10)) - >>> pen._commands - ['M0 10'] - """ - self._handleAnchor() - t = "M%s" % (pointToString(pt, self._ntos)) - self._commands.append(t) - self._lastCommand = "M" - self._lastX, self._lastY = pt - - def _lineTo(self, pt): - """ - # duplicate point - >>> pen = SVGPathPen(None) - >>> pen.moveTo((10, 10)) - >>> pen.lineTo((10, 10)) - >>> pen._commands - ['M10 10'] - - # vertical line - >>> pen = SVGPathPen(None) - >>> pen.moveTo((10, 10)) - >>> pen.lineTo((10, 0)) - >>> pen._commands - ['M10 10', 'V0'] - - # horizontal line - >>> pen = SVGPathPen(None) - >>> pen.moveTo((10, 10)) - >>> pen.lineTo((0, 10)) - >>> pen._commands - ['M10 10', 'H0'] - - # basic - >>> pen = SVGPathPen(None) - >>> pen.lineTo((70, 80)) - >>> pen._commands - ['L70 80'] - - # basic following a moveto - >>> pen = SVGPathPen(None) - >>> pen.moveTo((0, 0)) - >>> pen.lineTo((10, 10)) - >>> pen._commands - ['M0 0', ' 10 10'] - """ - x, y = pt - # duplicate point - if x == self._lastX and y == self._lastY: - return - # vertical line - elif x == self._lastX: - cmd = "V" - pts = self._ntos(y) - # horizontal line - elif y == self._lastY: - cmd = "H" - pts = self._ntos(x) - # previous was a moveto - elif self._lastCommand == "M": - cmd = None - pts = " " + pointToString(pt, self._ntos) - # basic - else: - cmd = "L" - pts = pointToString(pt, self._ntos) - # write the string - t = "" - if cmd: - t += cmd - self._lastCommand = cmd - t += pts - self._commands.append(t) - # store for future reference - self._lastX, self._lastY = pt - - def _curveToOne(self, pt1, pt2, pt3): - """ - >>> pen = SVGPathPen(None) - >>> pen.curveTo((10, 20), (30, 40), (50, 60)) - >>> pen._commands - ['C10 20 30 40 50 60'] - """ - t = "C" - t += pointToString(pt1, self._ntos) + " " - t += pointToString(pt2, self._ntos) + " " - t += pointToString(pt3, self._ntos) - self._commands.append(t) - self._lastCommand = "C" - self._lastX, self._lastY = pt3 - - def _qCurveToOne(self, pt1, pt2): - """ - >>> pen = SVGPathPen(None) - >>> pen.qCurveTo((10, 20), (30, 40)) - >>> pen._commands - ['Q10 20 30 40'] - >>> from fontTools.misc.roundTools import otRound - >>> pen = SVGPathPen(None, ntos=lambda v: str(otRound(v))) - >>> pen.qCurveTo((3, 3), (7, 5), (11, 4)) - >>> pen._commands - ['Q3 3 5 4', 'Q7 5 11 4'] - """ - assert pt2 is not None - t = "Q" - t += pointToString(pt1, self._ntos) + " " - t += pointToString(pt2, self._ntos) - self._commands.append(t) - self._lastCommand = "Q" - self._lastX, self._lastY = pt2 - - def _closePath(self): - """ - >>> pen = SVGPathPen(None) - >>> pen.closePath() - >>> pen._commands - ['Z'] - """ - self._commands.append("Z") - self._lastCommand = "Z" - self._lastX = self._lastY = None - - def _endPath(self): - """ - >>> pen = SVGPathPen(None) - >>> pen.endPath() - >>> pen._commands - [] - """ - self._lastCommand = None - self._lastX = self._lastY = None - - def getCommands(self): - return "".join(self._commands) - - -def main(args=None): - """Generate per-character SVG from font and text""" - - if args is None: - import sys - - args = sys.argv[1:] - - from fontTools.ttLib import TTFont - import argparse - - parser = argparse.ArgumentParser( - "fonttools pens.svgPathPen", description="Generate SVG from text" - ) - parser.add_argument("font", metavar="font.ttf", help="Font file.") - parser.add_argument("text", metavar="text", help="Text string.") - parser.add_argument( - "-y", - metavar="<number>", - help="Face index into a collection to open. Zero based.", - ) - parser.add_argument( - "--variations", - metavar="AXIS=LOC", - default="", - help="List of space separated locations. A location consist in " - "the name of a variation axis, followed by '=' and a number. E.g.: " - "wght=700 wdth=80. The default is the location of the base master.", - ) - - options = parser.parse_args(args) - - fontNumber = int(options.y) if options.y is not None else 0 - - font = TTFont(options.font, fontNumber=fontNumber) - text = options.text - - location = {} - for tag_v in options.variations.split(): - fields = tag_v.split("=") - tag = fields[0].strip() - v = int(fields[1]) - location[tag] = v - - hhea = font["hhea"] - ascent, descent = hhea.ascent, hhea.descent - - glyphset = font.getGlyphSet(location=location) - cmap = font["cmap"].getBestCmap() - - s = "" - width = 0 - for u in text: - g = cmap[ord(u)] - glyph = glyphset[g] - - pen = SVGPathPen(glyphset) - glyph.draw(pen) - commands = pen.getCommands() - - s += '<g transform="translate(%d %d) scale(1 -1)"><path d="%s"/></g>\n' % ( - width, - ascent, - commands, - ) - - width += glyph.width - - print('<?xml version="1.0" encoding="UTF-8"?>') - print( - '<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">' - % (width, ascent - descent) - ) - print(s, end="") - print("</svg>") - - -if __name__ == "__main__": - import sys - - if len(sys.argv) == 1: - import doctest - - sys.exit(doctest.testmod().failed) - - sys.exit(main()) diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/exrenc.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/exrenc.c deleted file mode 100644 index 36327f498cb47279ceb73110dc19a3db62bd36fc..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/exrenc.c +++ /dev/null @@ -1,560 +0,0 @@ -/* - * Copyright (c) 2021 Paul B Mahol - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * OpenEXR encoder - */ - -#include <float.h> -#include <zlib.h> - -#include "libavutil/avassert.h" -#include "libavutil/opt.h" -#include "libavutil/intreadwrite.h" -#include "libavutil/imgutils.h" -#include "libavutil/pixdesc.h" -#include "libavutil/float2half.h" -#include "avcodec.h" -#include "bytestream.h" -#include "codec_internal.h" -#include "encode.h" - -enum ExrCompr { - EXR_RAW, - EXR_RLE, - EXR_ZIP1, - EXR_ZIP16, - EXR_NBCOMPR, -}; - -enum ExrPixelType { - EXR_UINT, - EXR_HALF, - EXR_FLOAT, - EXR_UNKNOWN, -}; - -static const char abgr_chlist[4] = { 'A', 'B', 'G', 'R' }; -static const char bgr_chlist[4] = { 'B', 'G', 'R', 'A' }; -static const char y_chlist[4] = { 'Y' }; -static const uint8_t gbra_order[4] = { 3, 1, 0, 2 }; -static const uint8_t gbr_order[4] = { 1, 0, 2, 0 }; -static const uint8_t y_order[4] = { 0 }; - -typedef struct EXRScanlineData { - uint8_t *compressed_data; - unsigned int compressed_size; - - uint8_t *uncompressed_data; - unsigned int uncompressed_size; - - uint8_t *tmp; - unsigned int tmp_size; - - int64_t actual_size; -} EXRScanlineData; - -typedef struct EXRContext { - const AVClass *class; - - int compression; - int pixel_type; - int planes; - int nb_scanlines; - int scanline_height; - float gamma; - const char *ch_names; - const uint8_t *ch_order; - PutByteContext pb; - - EXRScanlineData *scanline; - - Float2HalfTables f2h_tables; -} EXRContext; - -static av_cold int encode_init(AVCodecContext *avctx) -{ - EXRContext *s = avctx->priv_data; - - ff_init_float2half_tables(&s->f2h_tables); - - switch (avctx->pix_fmt) { - case AV_PIX_FMT_GBRPF32: - s->planes = 3; - s->ch_names = bgr_chlist; - s->ch_order = gbr_order; - break; - case AV_PIX_FMT_GBRAPF32: - s->planes = 4; - s->ch_names = abgr_chlist; - s->ch_order = gbra_order; - break; - case AV_PIX_FMT_GRAYF32: - s->planes = 1; - s->ch_names = y_chlist; - s->ch_order = y_order; - break; - default: - av_assert0(0); - } - - switch (s->compression) { - case EXR_RAW: - case EXR_RLE: - case EXR_ZIP1: - s->scanline_height = 1; - s->nb_scanlines = avctx->height; - break; - case EXR_ZIP16: - s->scanline_height = 16; - s->nb_scanlines = (avctx->height + s->scanline_height - 1) / s->scanline_height; - break; - default: - av_assert0(0); - } - - s->scanline = av_calloc(s->nb_scanlines, sizeof(*s->scanline)); - if (!s->scanline) - return AVERROR(ENOMEM); - - return 0; -} - -static av_cold int encode_close(AVCodecContext *avctx) -{ - EXRContext *s = avctx->priv_data; - - for (int y = 0; y < s->nb_scanlines && s->scanline; y++) { - EXRScanlineData *scanline = &s->scanline[y]; - - av_freep(&scanline->tmp); - av_freep(&scanline->compressed_data); - av_freep(&scanline->uncompressed_data); - } - - av_freep(&s->scanline); - - return 0; -} - -static void reorder_pixels(uint8_t *dst, const uint8_t *src, ptrdiff_t size) -{ - const ptrdiff_t half_size = (size + 1) / 2; - uint8_t *t1 = dst; - uint8_t *t2 = dst + half_size; - - for (ptrdiff_t i = 0; i < half_size; i++) { - t1[i] = *(src++); - t2[i] = *(src++); - } -} - -static void predictor(uint8_t *src, ptrdiff_t size) -{ - int p = src[0]; - - for (ptrdiff_t i = 1; i < size; i++) { - int d = src[i] - p + 384; - - p = src[i]; - src[i] = d; - } -} - -static int64_t rle_compress(uint8_t *out, int64_t out_size, - const uint8_t *in, int64_t in_size) -{ - int64_t i = 0, o = 0, run = 1, copy = 0; - - while (i < in_size) { - while (i + run < in_size && in[i] == in[i + run] && run < 128) - run++; - - if (run >= 3) { - if (o + 2 >= out_size) - return -1; - out[o++] = run - 1; - out[o++] = in[i]; - i += run; - } else { - if (i + run < in_size) - copy += run; - while (i + copy < in_size && copy < 127 && in[i + copy] != in[i + copy - 1]) - copy++; - - if (o + 1 + copy >= out_size) - return -1; - out[o++] = -copy; - - for (int x = 0; x < copy; x++) - out[o + x] = in[i + x]; - - o += copy; - i += copy; - copy = 0; - } - - run = 1; - } - - return o; -} - -static int encode_scanline_rle(EXRContext *s, const AVFrame *frame) -{ - const int64_t element_size = s->pixel_type == EXR_HALF ? 2LL : 4LL; - - for (int y = 0; y < frame->height; y++) { - EXRScanlineData *scanline = &s->scanline[y]; - int64_t tmp_size = element_size * s->planes * frame->width; - int64_t max_compressed_size = tmp_size * 3 / 2; - - av_fast_padded_malloc(&scanline->uncompressed_data, &scanline->uncompressed_size, tmp_size); - if (!scanline->uncompressed_data) - return AVERROR(ENOMEM); - - av_fast_padded_malloc(&scanline->tmp, &scanline->tmp_size, tmp_size); - if (!scanline->tmp) - return AVERROR(ENOMEM); - - av_fast_padded_malloc(&scanline->compressed_data, &scanline->compressed_size, max_compressed_size); - if (!scanline->compressed_data) - return AVERROR(ENOMEM); - - switch (s->pixel_type) { - case EXR_FLOAT: - for (int p = 0; p < s->planes; p++) { - int ch = s->ch_order[p]; - - memcpy(scanline->uncompressed_data + frame->width * 4 * p, - frame->data[ch] + y * frame->linesize[ch], frame->width * 4); - } - break; - case EXR_HALF: - for (int p = 0; p < s->planes; p++) { - int ch = s->ch_order[p]; - uint16_t *dst = (uint16_t *)(scanline->uncompressed_data + frame->width * 2 * p); - const uint32_t *src = (const uint32_t *)(frame->data[ch] + y * frame->linesize[ch]); - - for (int x = 0; x < frame->width; x++) - dst[x] = float2half(src[x], &s->f2h_tables); - } - break; - } - - reorder_pixels(scanline->tmp, scanline->uncompressed_data, tmp_size); - predictor(scanline->tmp, tmp_size); - scanline->actual_size = rle_compress(scanline->compressed_data, - max_compressed_size, - scanline->tmp, tmp_size); - - if (scanline->actual_size <= 0 || scanline->actual_size >= tmp_size) { - FFSWAP(uint8_t *, scanline->uncompressed_data, scanline->compressed_data); - FFSWAP(int, scanline->uncompressed_size, scanline->compressed_size); - scanline->actual_size = tmp_size; - } - } - - return 0; -} - -static int encode_scanline_zip(EXRContext *s, const AVFrame *frame) -{ - const int64_t element_size = s->pixel_type == EXR_HALF ? 2LL : 4LL; - - for (int y = 0; y < s->nb_scanlines; y++) { - EXRScanlineData *scanline = &s->scanline[y]; - const int scanline_height = FFMIN(s->scanline_height, frame->height - y * s->scanline_height); - int64_t tmp_size = element_size * s->planes * frame->width * scanline_height; - int64_t max_compressed_size = tmp_size * 3 / 2; - unsigned long actual_size, source_size; - - av_fast_padded_malloc(&scanline->uncompressed_data, &scanline->uncompressed_size, tmp_size); - if (!scanline->uncompressed_data) - return AVERROR(ENOMEM); - - av_fast_padded_malloc(&scanline->tmp, &scanline->tmp_size, tmp_size); - if (!scanline->tmp) - return AVERROR(ENOMEM); - - av_fast_padded_malloc(&scanline->compressed_data, &scanline->compressed_size, max_compressed_size); - if (!scanline->compressed_data) - return AVERROR(ENOMEM); - - switch (s->pixel_type) { - case EXR_FLOAT: - for (int l = 0; l < scanline_height; l++) { - const int scanline_size = frame->width * 4 * s->planes; - - for (int p = 0; p < s->planes; p++) { - int ch = s->ch_order[p]; - - memcpy(scanline->uncompressed_data + scanline_size * l + p * frame->width * 4, - frame->data[ch] + (y * s->scanline_height + l) * frame->linesize[ch], - frame->width * 4); - } - } - break; - case EXR_HALF: - for (int l = 0; l < scanline_height; l++) { - const int scanline_size = frame->width * 2 * s->planes; - - for (int p = 0; p < s->planes; p++) { - int ch = s->ch_order[p]; - uint16_t *dst = (uint16_t *)(scanline->uncompressed_data + scanline_size * l + p * frame->width * 2); - const uint32_t *src = (const uint32_t *)(frame->data[ch] + (y * s->scanline_height + l) * frame->linesize[ch]); - - for (int x = 0; x < frame->width; x++) - dst[x] = float2half(src[x], &s->f2h_tables); - } - } - break; - } - - reorder_pixels(scanline->tmp, scanline->uncompressed_data, tmp_size); - predictor(scanline->tmp, tmp_size); - source_size = tmp_size; - actual_size = max_compressed_size; - compress(scanline->compressed_data, &actual_size, - scanline->tmp, source_size); - - scanline->actual_size = actual_size; - if (scanline->actual_size >= tmp_size) { - FFSWAP(uint8_t *, scanline->uncompressed_data, scanline->compressed_data); - FFSWAP(int, scanline->uncompressed_size, scanline->compressed_size); - scanline->actual_size = tmp_size; - } - } - - return 0; -} - -static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, - const AVFrame *frame, int *got_packet) -{ - EXRContext *s = avctx->priv_data; - PutByteContext *pb = &s->pb; - int64_t offset; - int ret; - int64_t out_size = 2048LL + avctx->height * 16LL + - av_image_get_buffer_size(avctx->pix_fmt, - avctx->width, - avctx->height, 64) * 3LL / 2; - - if ((ret = ff_get_encode_buffer(avctx, pkt, out_size, 0)) < 0) - return ret; - - bytestream2_init_writer(pb, pkt->data, pkt->size); - - bytestream2_put_le32(pb, 20000630); - bytestream2_put_byte(pb, 2); - bytestream2_put_le24(pb, 0); - bytestream2_put_buffer(pb, "channels\0chlist\0", 16); - bytestream2_put_le32(pb, s->planes * 18 + 1); - - for (int p = 0; p < s->planes; p++) { - bytestream2_put_byte(pb, s->ch_names[p]); - bytestream2_put_byte(pb, 0); - bytestream2_put_le32(pb, s->pixel_type); - bytestream2_put_le32(pb, 0); - bytestream2_put_le32(pb, 1); - bytestream2_put_le32(pb, 1); - } - bytestream2_put_byte(pb, 0); - - bytestream2_put_buffer(pb, "compression\0compression\0", 24); - bytestream2_put_le32(pb, 1); - bytestream2_put_byte(pb, s->compression); - - bytestream2_put_buffer(pb, "dataWindow\0box2i\0", 17); - bytestream2_put_le32(pb, 16); - bytestream2_put_le32(pb, 0); - bytestream2_put_le32(pb, 0); - bytestream2_put_le32(pb, avctx->width - 1); - bytestream2_put_le32(pb, avctx->height - 1); - - bytestream2_put_buffer(pb, "displayWindow\0box2i\0", 20); - bytestream2_put_le32(pb, 16); - bytestream2_put_le32(pb, 0); - bytestream2_put_le32(pb, 0); - bytestream2_put_le32(pb, avctx->width - 1); - bytestream2_put_le32(pb, avctx->height - 1); - - bytestream2_put_buffer(pb, "lineOrder\0lineOrder\0", 20); - bytestream2_put_le32(pb, 1); - bytestream2_put_byte(pb, 0); - - bytestream2_put_buffer(pb, "screenWindowCenter\0v2f\0", 23); - bytestream2_put_le32(pb, 8); - bytestream2_put_le64(pb, 0); - - bytestream2_put_buffer(pb, "screenWindowWidth\0float\0", 24); - bytestream2_put_le32(pb, 4); - bytestream2_put_le32(pb, av_float2int(1.f)); - - if (avctx->sample_aspect_ratio.num && avctx->sample_aspect_ratio.den) { - bytestream2_put_buffer(pb, "pixelAspectRatio\0float\0", 23); - bytestream2_put_le32(pb, 4); - bytestream2_put_le32(pb, av_float2int(av_q2d(avctx->sample_aspect_ratio))); - } - - if (avctx->framerate.num && avctx->framerate.den) { - bytestream2_put_buffer(pb, "framesPerSecond\0rational\0", 25); - bytestream2_put_le32(pb, 8); - bytestream2_put_le32(pb, avctx->framerate.num); - bytestream2_put_le32(pb, avctx->framerate.den); - } - - bytestream2_put_buffer(pb, "gamma\0float\0", 12); - bytestream2_put_le32(pb, 4); - bytestream2_put_le32(pb, av_float2int(s->gamma)); - - bytestream2_put_buffer(pb, "writer\0string\0", 14); - bytestream2_put_le32(pb, 4); - bytestream2_put_buffer(pb, "lavc", 4); - bytestream2_put_byte(pb, 0); - - switch (s->compression) { - case EXR_RAW: - /* nothing to do */ - break; - case EXR_RLE: - encode_scanline_rle(s, frame); - break; - case EXR_ZIP16: - case EXR_ZIP1: - encode_scanline_zip(s, frame); - break; - default: - av_assert0(0); - } - - switch (s->compression) { - case EXR_RAW: - offset = bytestream2_tell_p(pb) + avctx->height * 8LL; - - if (s->pixel_type == EXR_FLOAT) { - - for (int y = 0; y < avctx->height; y++) { - bytestream2_put_le64(pb, offset); - offset += avctx->width * s->planes * 4 + 8; - } - - for (int y = 0; y < avctx->height; y++) { - bytestream2_put_le32(pb, y); - bytestream2_put_le32(pb, s->planes * avctx->width * 4); - for (int p = 0; p < s->planes; p++) { - int ch = s->ch_order[p]; - bytestream2_put_buffer(pb, frame->data[ch] + y * frame->linesize[ch], - avctx->width * 4); - } - } - } else { - for (int y = 0; y < avctx->height; y++) { - bytestream2_put_le64(pb, offset); - offset += avctx->width * s->planes * 2 + 8; - } - - for (int y = 0; y < avctx->height; y++) { - bytestream2_put_le32(pb, y); - bytestream2_put_le32(pb, s->planes * avctx->width * 2); - for (int p = 0; p < s->planes; p++) { - int ch = s->ch_order[p]; - const uint32_t *src = (const uint32_t *)(frame->data[ch] + y * frame->linesize[ch]); - - for (int x = 0; x < frame->width; x++) - bytestream2_put_le16(pb, float2half(src[x], &s->f2h_tables)); - } - } - } - break; - case EXR_ZIP16: - case EXR_ZIP1: - case EXR_RLE: - offset = bytestream2_tell_p(pb) + s->nb_scanlines * 8LL; - - for (int y = 0; y < s->nb_scanlines; y++) { - EXRScanlineData *scanline = &s->scanline[y]; - - bytestream2_put_le64(pb, offset); - offset += scanline->actual_size + 8; - } - - for (int y = 0; y < s->nb_scanlines; y++) { - EXRScanlineData *scanline = &s->scanline[y]; - - bytestream2_put_le32(pb, y * s->scanline_height); - bytestream2_put_le32(pb, scanline->actual_size); - bytestream2_put_buffer(pb, scanline->compressed_data, - scanline->actual_size); - } - break; - default: - av_assert0(0); - } - - av_shrink_packet(pkt, bytestream2_tell_p(pb)); - - *got_packet = 1; - - return 0; -} - -#define OFFSET(x) offsetof(EXRContext, x) -#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM -static const AVOption options[] = { - { "compression", "set compression type", OFFSET(compression), AV_OPT_TYPE_INT, {.i64=0}, 0, EXR_NBCOMPR-1, VE, "compr" }, - { "none", "none", 0, AV_OPT_TYPE_CONST, {.i64=EXR_RAW}, 0, 0, VE, "compr" }, - { "rle" , "RLE", 0, AV_OPT_TYPE_CONST, {.i64=EXR_RLE}, 0, 0, VE, "compr" }, - { "zip1", "ZIP1", 0, AV_OPT_TYPE_CONST, {.i64=EXR_ZIP1}, 0, 0, VE, "compr" }, - { "zip16", "ZIP16", 0, AV_OPT_TYPE_CONST, {.i64=EXR_ZIP16}, 0, 0, VE, "compr" }, - { "format", "set pixel type", OFFSET(pixel_type), AV_OPT_TYPE_INT, {.i64=EXR_FLOAT}, EXR_HALF, EXR_UNKNOWN-1, VE, "pixel" }, - { "half" , NULL, 0, AV_OPT_TYPE_CONST, {.i64=EXR_HALF}, 0, 0, VE, "pixel" }, - { "float", NULL, 0, AV_OPT_TYPE_CONST, {.i64=EXR_FLOAT}, 0, 0, VE, "pixel" }, - { "gamma", "set gamma", OFFSET(gamma), AV_OPT_TYPE_FLOAT, {.dbl=1.f}, 0.001, FLT_MAX, VE }, - { NULL}, -}; - -static const AVClass exr_class = { - .class_name = "exr", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; - -const FFCodec ff_exr_encoder = { - .p.name = "exr", - CODEC_LONG_NAME("OpenEXR image"), - .priv_data_size = sizeof(EXRContext), - .p.priv_class = &exr_class, - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_EXR, - .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | - AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, - .init = encode_init, - FF_CODEC_ENCODE_CB(encode_frame), - .close = encode_close, - .p.pix_fmts = (const enum AVPixelFormat[]) { - AV_PIX_FMT_GRAYF32, - AV_PIX_FMT_GBRPF32, - AV_PIX_FMT_GBRAPF32, - AV_PIX_FMT_NONE }, -}; diff --git a/spaces/congsaPfin/Manga-OCR/logs/Enjoy Unlimited AI Art with starryai MOD APK (No Ads No Watermark).md b/spaces/congsaPfin/Manga-OCR/logs/Enjoy Unlimited AI Art with starryai MOD APK (No Ads No Watermark).md deleted file mode 100644 index 2aac26039f127891174cf97f5c316ddf4ea8e54d..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Enjoy Unlimited AI Art with starryai MOD APK (No Ads No Watermark).md +++ /dev/null @@ -1,99 +0,0 @@ -<br /> -<h1>Starry AI Mod APK Download: How to Create Amazing AI Art on Your Phone</h1> -<p>Do you love creating art? Do you want to unleash your creativity and make stunning artworks with just a few taps? If yes, then you should try Starry AI, an app that lets you generate incredible artworks with text prompts and artificial intelligence.</p> -<h2>starry ai mod apk download</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://urlca.com/2uOdCK">https://urlca.com/2uOdCK</a></b></p><br /><br /> -<p>Starry AI is an app that uses state-of-the-art AI methods to transform your words into art. You simply enter a text prompt and select a preferred style, and within minutes your unique AI-generated artwork will be ready for you to enjoy.</p> -<p>Starry AI is free to use and gives you full ownership of your creations. You can use them for your next art project, print them out, or share them on social media without any restrictions.</p> -<p>But what if you want more? What if you want unlimited credits and access to premium features? Well, there's a way. You can download Starry AI Mod APK, a modified version of the original app that gives you unlimited credits and access to premium features. With Starry AI Mod APK, you can create as many artworks as you want without any limitations. You can also unlock more styles, aspect ratios, and initial images to customize your creations.</p> -<p>But how do you download Starry AI Mod APK? And how do you use it to create AI art? In this article, we will answer these questions and more. We will also show you some of the benefits of using Starry AI and some of the alternatives to Starry AI if you want to explore more options for creating AI art.</p> -<h2>How to Download Starry AI Mod APK?</h2> -<p>Starry AI Mod APK is a modified version of the original app that gives you unlimited credits and access to premium features. To download Starry AI Mod APK, you need to follow these steps:</p> -<p>starryai apk free download for android<br /> -starryai mod apk premium unlocked<br /> -starryai app create artworks with ai<br /> -starryai mod apk latest version<br /> -starryai apk download apkcombo<br /> -starryai mod apk no ads<br /> -starryai app enter a prompt for ai<br /> -starryai mod apk apklOO<br /> -starryai apk download for pc<br /> -starryai mod apk unlimited styles<br /> -starryai app select a preferred style<br /> -starryai mod apk 1.6.1<br /> -starryai apk download for ios<br /> -starryai mod apk hack<br /> -starryai app enjoy your ai artwork<br /> -starryai mod apk free onlyfans accounts<br /> -starryai apk download for windows 10<br /> -starryai mod apk cracked<br /> -starryai app generate incredible artworks<br /> -starryai mod apk download link<br /> -starryai apk download for mac<br /> -starryai mod apk full version<br /> -starryai app use emojis as prompts<br /> -starryai mod apk online<br /> -starryai apk download for laptop<br /> -starryai mod apk android 11<br /> -starryai app review and rating<br /> -starryai mod apk reddit<br /> -starryai apk download for chromebook<br /> -starryai mod apk android 10<br /> -starryai app features and benefits<br /> -starryai mod apk android 9<br /> -starryai apk download for tablet<br /> -starryai mod apk android 8<br /> -starryai app how to use guide<br /> -starryai mod apk android 7<br /> -starryai apk download for firestick<br /> -starryai mod apk android 6<br /> -starryai app faq and support<br /> -starryai mod apk android 5</p> -<ol> -<li>Find a reliable source for the modded file and download it on your device. You can search for Starry AI Mod APK on Google or use a trusted website like APKPure or APKMirror.</li> -<li>Enable unknown sources in your security settings and install the file. To do this, go to Settings > Security > Unknown Sources and toggle it on. Then, locate the downloaded file and tap on it to install it.</li> -<li>Launch the app and enjoy creating AI art without any limitations. You will see that you have unlimited credits and access to premium features in the app.</li> -</ol> -<p>Note: Downloading and installing modded apps is not recommended by the original developers and may pose some risks to your device and data. Use Starry AI Mod APK at your own discretion and responsibility.</p> -<h2>How to Use Starry AI to Create AI Art?</h2> -<p>Starry AI is easy to use and has a user-friendly interface. To create AI art with Starry AI, you need to follow these steps:</p> -<ol> -<li>Enter a text prompt for the AI to work with. It can be anything, even emojis or abstract concepts. For example, you can enter "a unicorn in space" or "???". The text prompt will serve as the inspiration for your art.</li> -<li>Select a preferred style from the available options. You can choose from different styles such as realistic, abstract, cartoon, sketch, watercolor, oil painting, etc. You can also adjust the brightness, contrast, saturation, and hue of the style.</li> -<li>Wait for a few minutes and see your AI-generated artwork ready for you to enjoy. The app will use state-of-the-art AI methods to transform your text prompt into an amazing artwork. You can see the progress of the generation on the screen.</li> -</ol> -<p>You can also edit, save, share, or print your artworks with Starry AI. You can use the tools in the app to crop, rotate, flip, or filter your artworks. You can also save them in your gallery or share them on social media platforms like Facebook, Instagram, Twitter, etc. You can also print them out using a compatible printer or order them online.</p> -<h2>What are the Benefits of Using Starry AI?</h2> -<p>Starry AI is a great tool for anyone who wants to explore their creativity and create stunning art pieces. Some of the benefits of using Starry AI are:</p> -<ul> -<li>You can generate up to 5 artworks for free every day without any watermarks. If you want more, you can use credits or watch ads to generate more artworks.</li> -<li>You can choose from a variety of styles, aspect ratios, and initial images to customize your creations. You can also adjust the parameters of the styles to suit your preferences.</li> -<li>You can use any text prompt as an inspiration for your art, even emojis or abstract concepts. The app will use natural language processing and computer vision to understand your prompt and generate relevant artworks.</li> -<li>You can use your artworks for your next art project, print them out, or share them on social media without any restrictions. You have full ownership of your creations and can use them however you want.</li> -</ul> -<h2>What are the Alternatives to Starry AI?</h2> -<p>Starry AI is not the only AI art generator app out there. There are several alternatives that are just as good or better. Some of the alternatives to Starry AI are:</p> -<ul> -<li>DALL-E 2: A paid app that creates realistic images and art from natural language descriptions. It uses a powerful neural network model called DALL-E that can generate diverse and coherent images from any text input.</li> -<li>NightCafe Creator: A free app that uses neural style transfer to transform existing photos into artworks. It allows you to choose from hundreds of styles inspired by famous artists or upload your own style. It also lets you adjust the style strength, resolution, and quality of the output.</li> -<li>DiffusionBee: A free app that uses diffusion models to create abstract and realistic images from text prompts. It uses a novel technique called diffusion generative adversarial networks (DiffusionGANs) that can produce high-quality and diverse images from any text input.</li> -<li>Simplified Image Generator: A free app that uses transformer models to create high-quality images from text prompts. It uses a simplified version of the OpenAI's Image GPT model that can generate images up to 256x256 pixels from any text input.</li> -</ul> -<h2>Conclusion</h2> -<p>Starry AI is an amazing app that lets you create stunning artworks with text prompts and AI methods. You can download Starry AI Mod APK to get unlimited credits and access to premium features. You can also try other alternatives to Starry AI if you want to explore more options for creating AI art.</p> -<p>Starry AI is a great way to unleash your creativity and make art with just a few taps. Whether you want to create realistic, abstract, cartoon, or sketch artworks, Starry AI can help you do it. You can also use any text prompt as an inspiration for your art, even emojis or abstract concepts.</p> -<p>Starry AI is a fun and easy way to create amazing AI art on your phone. Download Starry AI Mod APK today and start creating your own masterpieces!</p> -<h2>FAQs</h2> -<ol> -<li>What is Starry AI?</li> -<p>Starry AI is an AI art generator app that lets you create stunning artworks with text prompts and artificial intelligence.</p> -<li>How to download Starry AI Mod APK?</li> -<p>To download Starry AI Mod APK, you need to find a reliable source for the modded file and download it on your device. Then, you need to enable unknown sources in your security settings and install the file. Finally, you need to launch the app and enjoy creating AI art without any limitations.</p> -<li>How to use Starry AI to create AI art?</li> -<p>To create AI art with Starry AI, you need to enter a text prompt and select a preferred style. Then, you need to wait for a few minutes and see your AI-generated artwork ready for you to enjoy. You can also edit, save, share, or print your artworks with Starry AI.</p> -<li>What are the benefits of using Starry AI?</li> -<p>Some of the benefits of using Starry AI are that you can generate up to 5 artworks for free every day without any watermarks, choose from a variety of styles, aspect ratios, and initial images to customize your creations, use any text prompt as an inspiration for your art, even emojis or abstract concepts, and use your artworks for your next art project, print them out, or share them on social media without any restrictions.</p> -<li>What are the alternatives to Starry AI?</li> -<p>Some of the alternatives to Starry AI are DALL-E 2, NightCafe Creator, DiffusionBee, and Simplified Image Generator. These are other apps that use different AI methods to create images and art from text prompts.</p> -</ol></p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Facebook Video Download x2 - How to Download Videos from Facebook Watch and More.md b/spaces/congsaPfin/Manga-OCR/logs/Facebook Video Download x2 - How to Download Videos from Facebook Watch and More.md deleted file mode 100644 index ab633e13a6849da14617acbc7ca9a5ad5162c640..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Facebook Video Download x2 - How to Download Videos from Facebook Watch and More.md +++ /dev/null @@ -1,136 +0,0 @@ -<br /> -<h1>Facebook Video Download X2: How to Download Videos from Facebook in 2 Easy Steps</h1> -<p>Do you want to download videos from Facebook and watch them offline? Do you want to save your favorite videos from Facebook Watch, News Feed, or Messenger? Do you want to do it in a fast and easy way?</p> -<p>If you answered yes to any of these questions, then this article is for you. In this article, you will learn what is Facebook Video Download X2 and why you need it. You will also learn how to download videos from Facebook in 2 easy steps using two different online tools. By the end of this article, you will be able to download any video from Facebook with just a few clicks.</p> -<h2>facebook video download x2</h2><br /><p><b><b>Download</b> · <a href="https://urlca.com/2uO4rL">https://urlca.com/2uO4rL</a></b></p><br /><br /> -<h2>Introduction</h2> -<h3>What is Facebook Video Download X2?</h3> -<p>Facebook Video Download X2 is a term that refers to the process of downloading videos from Facebook using two simple steps. The first step is to copy the video link from Facebook, and the second step is to paste it into a Facebook video downloader. A Facebook video downloader is an online tool that allows you to download videos from Facebook and save them on your device.</p> -<h3>Why do you need to download videos from Facebook?</h3> -<p>There are many reasons why you might want to download videos from Facebook. Here are some of them:</p> -<ul> -<li>You want to watch the videos offline without internet connection.</li> -<li>You want to save the videos on your device for future reference or backup.</li> -<li>You want to share the videos with your friends or family who don't have access to Facebook.</li> -<li>You want to edit the videos or use them for your own projects.</li> -</ul> -<p>Whatever your reason is, downloading videos from Facebook can be very useful and convenient. However, downloading videos from Facebook is not as easy as it seems. You can't just right-click on the video and choose "Save as". You need a special tool that can extract the video from Facebook and convert it into a downloadable format.</p> -<h2>How to Download Videos from Facebook in 2 Easy Steps</h2> -<h3>Step 1: Copy the video link from Facebook</h3> -<p>The first step to download videos from Facebook is to copy the video link from Facebook. The video link is the URL that appears in your browser's address bar when you open the video on Facebook. Depending on where you find the video on Facebook, there are different ways to copy the video link. Here are some examples:</p> -<h4>How to copy the video link from Facebook Watch</h4> -<p>Facebook Watch is a section on Facebook where you can find original shows, live events, and popular videos. To copy the video link from Facebook Watch, follow these steps:</p> -<ol> -<li>Go to <a href="(^1^)">facebook.com/watch</a> and browse or search for the video you want to download.</li> -<li>Click on the video to open it in full screen mode.</li> -<li>Click on the three-dot icon at the top right corner of the video player.</li> -<li>Select "Copy link" from the menu that appears.</li> -</ol> -<h4>How to copy the video link from Facebook News Feed</h4> -<p>Facebook News Feed is where you can see updates from your friends, pages, groups, and other sources. To copy the video link from Facebook News Feed, follow these steps:</p> -<p>How to download videos from Facebook using x2 speed<br /> -Facebook video downloader online free x2<br /> -Download Facebook videos faster with x2 option<br /> -Best Facebook video downloader app with x2 feature<br /> -Save Facebook videos to your computer or mobile in x2 mode<br /> -Facebook video download x2 chrome extension<br /> -Facebook video download x2 firefox addon<br /> -Facebook video download x2 safari plugin<br /> -Facebook video download x2 edge extension<br /> -Facebook video download x2 opera extension<br /> -How to use FDOWN.net for Facebook video download x2<br /> -How to use Toolzu.com for Facebook video download x2<br /> -How to use Tom's Guide for Facebook video download x2<br /> -How to download Facebook reels in x2 speed<br /> -How to download Facebook stories in x2 speed<br /> -How to download private videos from Facebook in x2 speed<br /> -How to download live videos from Facebook in x2 speed<br /> -How to download 4K videos from Facebook in x2 speed<br /> -How to download HD videos from Facebook in x2 speed<br /> -How to download 360 videos from Facebook in x2 speed<br /> -How to download VR videos from Facebook in x2 speed<br /> -How to download 3D videos from Facebook in x2 speed<br /> -How to download IGTV videos from Facebook in x2 speed<br /> -How to download Watch Party videos from Facebook in x2 speed<br /> -How to download Premiere videos from Facebook in x2 speed<br /> -How to download Gaming videos from Facebook in x2 speed<br /> -How to download Music videos from Facebook in x2 speed<br /> -How to download News videos from Facebook in x2 speed<br /> -How to download Sports videos from Facebook in x2 speed<br /> -How to download Comedy videos from Facebook in x2 speed<br /> -How to download Education videos from Facebook in x2 speed<br /> -How to download Entertainment videos from Facebook in x2 speed<br /> -How to download Lifestyle videos from Facebook in x2 speed<br /> -How to download Travel videos from Facebook in x2 speed<br /> -How to download Food videos from Facebook in x2 speed<br /> -How to download Fashion videos from Facebook in x2 speed<br /> -How to download Beauty videos from Facebook in x2 speed<br /> -How to download Health videos from Facebook in x2 speed<br /> -How to download Fitness videos from Facebook in x2 speed<br /> -How to download Wellness videos from Facebook in x2 speed<br /> -How to download Art videos from Facebook in x2 speed<br /> -How to download Culture videos from Facebook in x2 speed<br /> -How to download Science videos from Facebook in x2 speed<br /> -How to download Technology videos from Facebook in x2 speed<br /> -How to download Business videos from Facebook in x2 speed<br /> -How to download Marketing videos from Facebook in x2 speed<br /> -How to download Finance videos from Facebook in x2 speed<br /> -How to download Politics videos from Facebook in x2 speed<br /> -How to download Social issues videos from Facebook in x2 speed</p> -<ol> -<li>Go to <a href="">facebook.com</a> and scroll or search for the video you want to download.</li> -<li>Click on the video to open it in a pop-up window.</li> -<li>Right-click on the video and select "Show video URL" from the menu that appears.</li> -<li>Copy the URL that appears in a small box above the video.</li> -</ol> -<h4>How to copy the video link from Facebook Messenger</h4> -<p>Facebook Messenger is a messaging app that allows you to chat, call, and video call with your Facebook contacts. To copy the video link from Facebook Messenger, follow these steps:</p> -<ol> -<li>Open the Facebook Messenger app on your device or go to <a href="">messenger.com</a> on your browser.</li> -<li>Open the conversation where the video was sent or received.</li> -<li>Tap and hold on the video until a menu appears.</li> -<li>Select "Copy Link" from the menu.</li> -</ol> -<h3>Step 2: Paste the video link into a Facebook video downloader</h3> -<p>The second step to download videos from Facebook is to paste the video link into a Facebook video downloader. A Facebook video downloader is an online tool that allows you to download videos from Facebook and save them on your device. There are many Facebook video downloaders available on the internet, but not all of them are reliable and safe. Here are two examples of Facebook video downloaders that you can use:</p> -<h4>How to use FDOWN.net as a Facebook video downloader</h4> -<p>FDOWN.net is a free and easy-to-use online tool that lets you download videos from Facebook in various formats and qualities. To use FDOWN.net as a Facebook video downloader, follow these steps:</p> -<ol> -<li>Go to <a href="">fdown.net</a> on your browser.</li> -<li>Paste the video link that you copied from Facebook into the input box.</li> -<li>Click on the "Download" button next to the input box.</li> -<li>Select the format and quality of the video that you want to download from the list that appears.</li> -<li>Click on the "Download" button next to the format and quality that you chose.</li> -<li>The video will start downloading automatically or you will be redirected to another page where you can right-click and choose "Save as" to download the video.</li> -</ol> -<h4>How to use Toolzu.com as a Facebook video downloader</h4> -<p>Toolzu.com is another free and easy-to-use online tool that lets you download videos from Facebook in various formats and qualities. To use Toolzu.com as a Facebook video downloader, follow these steps:</p> -<ol> -<li>Go to <a href="">toolzu.com/facebook-video-downloader</a> on your browser.</li> -<li>Paste the video link that you copied from Facebook into the input box.</li> -<li>Click on the "Download" button below the input box.</li> -<li>Select the format and quality of the video that you want to download from the list that appears.</li> -<li>Click on the "Download" button below the format and quality that you chose.</li> -<li>The video will start downloading automatically or you will be prompted to choose a location to save the video on your device.</li> -</ol> - <h2>Conclusion</h2> - <h3>Summary of the main points</h3> - <p>In this article, you learned what is Facebook Video Download X2 and why you need it. You also learned how to download videos from Facebook in 2 easy steps using two different online tools. By following these steps, you can download any video from Facebook with just a few clicks and enjoy them offline or share them with others.</p> - <h3>Call to action</h3> - <p>If you found this article helpful, please share it with your friends and family who might also want to download videos from Facebook. You can also leave a comment below and let us know what you think about this article or if you have any questions or suggestions. Thank you for reading!</p> - <h2>FAQs</h2> - <p>Here are some frequently asked questions about Facebook Video Download X2:</p> - <ul> - <li><b>Q: Is it legal to download videos from Facebook?</b></li> - <p>A: It depends on the source and purpose of the videos. If you download videos from Facebook that are public and do not violate any copyright laws or terms of service, then it is legal. However, if you download videos from Facebook that are private, copyrighted, or intended for personal use only, then it is illegal. You should always respect the rights and wishes of the video owners and creators and use the videos for fair and legal purposes only.</p> - <li><b>Q: Can I download videos from Facebook on my mobile device?</b></li> - <p>A: Yes, you can download videos from Facebook on your mobile device using the same steps as described above. However, you might need to use a different browser or app to copy the video link from Facebook and paste it into the Facebook video downloader. For example, you can use the Facebook app or the Chrome browser to copy the video link from Facebook, and then use the Safari browser or another app to paste it into the Facebook video downloader.</p> - <li><b>Q: How can I download videos from Facebook in HD quality?</b></li> - <p>A: The quality of the downloaded videos depends on the quality of the original videos on Facebook. If the original videos are in HD quality, then you can download them in HD quality using the Facebook video downloaders. However, if the original videos are in low quality, then you cannot download them in HD quality. You can check the quality of the original videos by clicking on the gear icon at the bottom right corner of the video player and selecting "Quality".</p> - <li><b>Q: How long does it take to download videos from Facebook?</b></li> - <p>A: The time it takes to download videos from Facebook depends on several factors, such as the size and length of the videos, the speed of your internet connection, and the performance of the Facebook video downloader. Generally, it takes a few seconds to a few minutes to download videos from Facebook. However, if you encounter any problems or errors during the download process, you might need to retry or use a different Facebook video downloader.</p> - <li><b>Q: What are some other ways to download videos from Facebook?</b></li> - <p>A: Besides using online tools like FDOWN.net and Toolzu.com, there are some other ways to download videos from Facebook. For example, you can use desktop software like 4K Video Downloader or Freemake Video Downloader, browser extensions like Video Downloader for Chrome or Video DownloadHelper for Firefox, or mobile apps like Video Downloader for Facebook or MyMedia File Manager. However, these methods might require more steps, permissions, or installations than using online tools.</p> -</ul></p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Head Ball APK - Challenge Your Friends in 1v1 Football Duels.md b/spaces/congsaPfin/Manga-OCR/logs/Head Ball APK - Challenge Your Friends in 1v1 Football Duels.md deleted file mode 100644 index e3820956632a11098de8c346f00781fd89f0b52d..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Head Ball APK - Challenge Your Friends in 1v1 Football Duels.md +++ /dev/null @@ -1,77 +0,0 @@ -<br /> -<h1>Head Ball 1 APK Download: A Fun and Thrilling Soccer Game</h1> -<p>Do you love soccer and want to play a different kind of game that is fast-paced, competitive, and exciting? If so, you might want to try head ball 1, a multiplayer soccer game that features bobble-head characters and superpowers. In this article, we will tell you what head ball 1 is, how to download it for your Android device, how to play it, and how to win more matches with some tips and tricks. Let's get started!</p> -<h2>head ball 1 apk download</h2><br /><p><b><b>DOWNLOAD</b> ○ <a href="https://urlca.com/2uOcxh">https://urlca.com/2uOcxh</a></b></p><br /><br /> - <h2>What is Head Ball 1?</h2> -<p>Head ball 1 is a soccer game that simplifies the gameplay by putting only two players (one from each team) on the field. The players are disembodied heads with a single foot, and they have to score goals by kicking, striking, or using their superpowers. The game has a cartoonish and humorous style, with various characters, outfits, accessories, and stadiums to unlock. The game also has a voice commentary by the legendary John Motson, who adds more fun and excitement to the matches.</p> - <h2>How to Download Head Ball 1 APK for Android Devices?</h2> -<p>If you want to play head ball 1 on your Android device, you will need to download the APK file from a reliable source. APK stands for Android Package Kit, and it is a file format that allows you to install apps that are not available on the Google Play Store. However, you need to be careful when downloading APK files, as some of them may contain malware or viruses that can harm your device. Here are the steps to download head ball 1 APK safely:</p> -<ol> -<li>Go to [APKCombo](^1^), a trusted website that offers free APK downloads for various apps and games.</li> -<li>Search for "head ball" in the search bar and click on the result that says "Head Ball APK (Android Game) - Free Download".</li> -<li>On the next page, scroll down and click on the green button that says "Download APK (21 MB)".</li> -<li>Wait for the download to finish and then open the file manager app on your device.</li> -<li>Locate the downloaded APK file and tap on it to install it. You may need to enable the option "Allow installation from unknown sources" in your device settings.</li> -<li>Once the installation is done, you can launch the game and enjoy playing head ball 1.</li> -</ol> - <h2>How to Play Head Ball 1 and What are the Features of the Game?</h2> -<p>Head ball 1 is a simple yet addictive game that can be played online against real opponents from all over the world. You can also challenge your friends by connecting your Facebook account or join a team or create your own team and gain rewards as you win matches. The game has five different leagues that you can compete in, from Bronze League to Diamond League. The more matches you win, the more fans you gain and the more stadiums you unlock.</p> -<p>The game has simple controls that allow you to move your character left or right, jump, shoot low or high, and use superpowers. Superpowers are special abilities that can give you an edge over your opponent or create unexpected challenges on the field. For example, you can freeze your opponent in a block of ice, make your character huge or tiny, reverse your controls, or make the ball burst into flames. You can unlock new superpowers as you progress through the game.</p> -<p>The game also has a unique career mode that allows you to unlock special bonuses, characters, and accessories. You can customize your character with different outfits, hats, glasses, shoes, balls, and more. You can also upgrade your character's skills with cards and coins that you earn by playing. The game has over 100 characters that have different stats and abilities.</p> - <h2>How to Win More Matches in Head Ball 1 with Tips and Tricks?</h2> -<p>If you want to become a champion in head ball 1, you will need some tips and tricks to improve your skills and strategy. Here are some of them:</p> -<ul> -<li>Practice your timing and accuracy. The game is all about hitting the ball at the right moment and in the right direction. You need to anticipate your opponent's moves and react quickly. You also need to aim for the corners of the goal or the top of the net to score more easily.</li> -<li>Use your superpowers wisely. Superpowers can be a game-changer, but they also have a cooldown time and a limited number of uses. You need to use them strategically and not waste them. For example, you can use them to score a goal, to prevent a goal, or to create a surprise effect. You also need to be careful of your opponent's superpowers and avoid them if possible.</li> -<li>Upgrade your character and skills. As you play more matches, you will earn coins and cards that you can use to upgrade your character's attributes, such as speed, jump, shoot, and power. You can also unlock new characters that have different strengths and weaknesses. You should choose a character that suits your playstyle and preferences.</li> -<li>Customize your character and ball. The game allows you to change your character's appearance and ball with various items that you can buy or win. Some of these items can also give you extra bonuses, such as increasing your fan base, boosting your skills, or reducing your opponent's skills. You should try different combinations and see what works best for you.</li> -<li>Have fun and enjoy the game. The most important tip is to have fun and enjoy the game. The game is meant to be entertaining and humorous, so don't take it too seriously or get frustrated if you lose. Just keep playing and learning from your mistakes, and you will improve over time.</li> -</ul> - <h2>Conclusion: A Fun and Thrilling Soccer Game</h2> -<p>Head ball 1 is a fun and thrilling soccer game that you can play on your Android device with an APK file. The game has simple controls, cartoonish graphics, voice commentary, multiplayer mode, career mode, superpowers, customization options, and more. The game is easy to play but hard to master, so you will need some tips and tricks to win more matches. If you love soccer and want to try something different, head ball 1 is the game for you.</p> -<p>head ball 1v1 online football game apk download<br /> -head ball multiplayer soccer apk download<br /> -head ball android game free download<br /> -head ball apk latest version download<br /> -head ball mod apk unlimited money download<br /> -head ball offline football game apk download<br /> -head ball 2 online soccer apk download<br /> -head ball 2 mod apk download<br /> -head ball 2 hack apk download<br /> -head ball 2 apk free download for android<br /> -head ball 2 apk old version download<br /> -head ball 2 apk pure download<br /> -head ball 2 apk mirror download<br /> -head ball 2 apk uptodown download<br /> -head ball 2 apk rexdl download<br /> -head ball 2 apk revdl download<br /> -head ball 2 apk mod menu download<br /> -head ball 2 apk mod unlimited diamonds download<br /> -head ball 2 apk mod unlimited gold download<br /> -head ball 2 apk mod unlimited energy download<br /> -head ball 2 online soccer game apk download<br /> -head ball 2 online football game apk download<br /> -head ball 2 online multiplayer game apk download<br /> -head ball 2 online pvp game apk download<br /> -head ball 2 online sports game apk download<br /> -head ball classic football game apk download<br /> -head ball classic soccer game apk download<br /> -head ball classic multiplayer game apk download<br /> -head ball classic online game apk download<br /> -head ball classic mod apk download<br /> -head soccer - football game - sports heads - big heads - big heads soccer - big heads football - big heads games - big heads basketball - big heads tennis - big heads volleyball - big heads hockey - big heads golf - big heads racing - big heads cards - big heads world cup - big heads champions league - big heads premier league - big heads la liga - big heads bundesliga - big heads serie a - big heads ligue 1 - big heads euro cup - big heads copa america - big heads asian cup - big heads african cup of nations - big heads olympics - big heads world cup qualifiers - big heads world cup group stage - big heads world cup knockout stage - big heads world cup final</p> -<p>Are you ready to download head ball 1 APK and start playing? If so, click on the link below and follow the instructions. Have fun!</p> -<p>[Download Head Ball 1 APK]</p> - <h2>FAQs: Five Frequently Asked Questions and Answers About Head Ball 1</h2> -<h3>Q: Is head ball 1 free to play?</h3> -<p>A: Yes, head ball 1 is free to play, but it contains ads and in-app purchases that can enhance your gaming experience.</p> - <h3>Q: How many superpowers are there in head ball 1?</h3> -<p>A: There are over 50 superpowers in head ball 1 that can affect the gameplay in various ways.</p> - <h3>Q: How can I connect with my friends in head ball 1?</h3> -<p>A: You can connect with your friends by linking your Facebook account or by creating or joining a team.</p> - <h3>Q: What are the benefits of joining a team in head ball 1?</h3> -<p>A: Joining a team in head ball 1 can give you rewards such as coins, cards, items, and more. You can also chat with your teammates and challenge other teams.</p> - <h3>Q: How can I contact the developers of head ball 1?</h3> -<p>A: You can contact the developers of head ball 1 by sending an email to support@masomo.com or by visiting their website at https://www.masomo.com/.</p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Download and Play Brawlhalla on Windows 11 for Free.md b/spaces/congsaPfin/Manga-OCR/logs/How to Download and Play Brawlhalla on Windows 11 for Free.md deleted file mode 100644 index 5ffd75c0cdedd5aff2d270eccacc1254a05b8481..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to Download and Play Brawlhalla on Windows 11 for Free.md +++ /dev/null @@ -1,151 +0,0 @@ -<br /> -<h1>How to Download and Play Brawlhalla on Windows 11</h1> -<p>If you are looking for a fun and free platform fighting game that you can play on your Windows 11 PC, you might want to check out Brawlhalla. Brawlhalla is a game that lets you brawl with up to eight players online or locally, using over 50 legends from history and fantasy. You can also play cross-platform with millions of players on PlayStation, Xbox, Nintendo Switch, iOS, Android, and Steam. In this article, we will show you what Brawlhalla is, what are the system requirements for Windows 11, how to download and install Brawlhalla on Windows 11, and how to start playing Brawlhalla on Windows 11.</p> - <h2>What is Brawlhalla?</h2> -<p>Brawlhalla is a free-to-play platform fighting game that was developed by Blue Mammoth Games and published by Ubisoft. It was released in 2017 for PC and consoles, and in 2020 for mobile devices. The game is inspired by the Super Smash Bros. series, but has its own unique features and mechanics.</p> -<h2>brawlhalla download windows 11</h2><br /><p><b><b>DOWNLOAD</b> ✏ <a href="https://urlca.com/2uOba0">https://urlca.com/2uOba0</a></b></p><br /><br /> - <h3>A free-to-play platform fighting game</h3> -<p>Brawlhalla is a game that lets you fight on various stages using different weapons, items, and abilities. You can choose from over 50 legends, each with their own stats, signature moves, and weapon combinations. You can also unlock skins, taunts, emotes, colors, and more by playing the game or purchasing them with in-game currency or real money. Brawlhalla does not have any pay-to-win advantages or in-game purchases that affect gameplay.</p> - <h3>Supports cross-play and online multiplayer</h3> -<p>Brawlhalla is a game that supports cross-play across all platforms, meaning that you can play with or against anyone who has the game on any device. You can also play online with up to eight players in various game modes, such as free-for-all, ranked matches, custom games, tournaments, events, and more. You can also invite your friends to a private room or join a clan to chat and brawl with other players.</p> - <h3>Features over 50 legends and frequent updates</h3> -<p>Brawlhalla is a game that features over 50 legends from different cultures, myths, and media. You can play as warriors, pirates, ninjas, vikings, aliens, robots, zombies, gods, and more. You can also play as guest characters from other games or franchises, such as Lara Croft from Tomb Raider, Shovel Knight from Shovel Knight, Rayman from Rayman, Finn and Jake from Adventure Time, Ben 10 from Ben 10, The Walking Dead from The Walking Dead , Steven Universe from Steven Universe , Hellboy from Hellboy , John Cena from WWE , Kung Fu Panda from Kung Fu Panda , Teenage Mutant Ninja Turtles from Teenage Mutant Ninja Turtles , Diamondhead from Ben 10 , Master Chief from Halo , Amethyst from Steven Universe , Garnet from Steven Universe , Pearl from Steven Universe , Peridot from Steven Universe , Lapis Lazuli from Steven Universe , Bismuth from Steven Universe , Stevonnie from Steven Universe , Lion from Steven Universe , Greg Universe from Steven Universe , Connie Maheswaran from Steven Universe , Negan from The Walking Dead , Rick Grimes from The Walking Dead , Michonne from The Walking Dead , Daryl Dixon from The Walking Dead, and more. Brawlhalla also receives frequent updates that add new legends, weapons, stages, game modes, features, and bug fixes.</p> - <h2>What are the system requirements for Windows 11?</h2> -<p>Windows 11 is the latest version of the Windows operating system that was released in October 2021. Windows 11 offers a new design, improved performance, enhanced security, and more features than Windows 10. However, Windows 11 also has higher system requirements than Windows 10, which means that not all PCs can run it. Here are the minimum system requirements for Windows 11:</p> - <h3>Processor, RAM, storage, firmware, TPM, graphics card, display, internet connection, and Microsoft account</h3> -<table> -<tr> -<th>Component</th> -<th>Requirement</th> -</tr> -<tr> -<td>Processor</td> -<td>1 gigahertz (GHz) or faster with 2 or more cores on a compatible 64-bit processor or System on a Chip (SoC)</td> -</tr> -<tr> -<td>RAM</td> -<td>4 gigabyte (GB)</td> -</tr> -<tr> -<td>Storage</td> -<td>64 GB or larger storage device</td> -</tr> -<tr> -<td>Firmware</td> -<td>UEFI, Secure Boot capable</td> -</tr> -<tr> -<td>TPM</td> -<td>Trusted Platform Module (TPM) version 2.0</td> -</tr> -<tr> -<td>Graphics card</td> -<td>Compatible with DirectX 12 or later with WDDM 2.0 driver</td> -</tr> -<tr> -<td>Display</td> -<td>High definition (720p) display that is greater than 9” diagonally, 8 bits per color channel</td> -</tr> -<tr> -<td>Internet connection and Microsoft account</td> -<td>Windows 11 Home edition requires internet connectivity and a Microsoft account to complete device setup on first use. Switching a device out of Windows 11 Home in S mode also requires internet connectivity.</td> -</tr> -</table> - <h3>How to check if your PC meets the requirements</h3> -<p>If you are not sure if your PC meets the requirements for Windows 11, you can use the PC Health Check app to find out. The PC Health Check app is a free tool from Microsoft that can scan your PC and tell you if it is compatible with Windows 11 or not. You can download the PC Health Check app from the official website and follow the instructions to run it. If your PC is compatible, you will see a message that says "This PC can run Windows 11". If your PC is not compatible, you will see a message that says "This PC can't run Windows 11" and the reason why.</p> - <h2>How to download and install Brawlhalla on Windows 11?</h2> -<p>If your PC meets the requirements for Windows 11 and you have upgraded to it or bought a new PC with it pre-installed, you can download and install Brawlhalla on Windows 11 easily. There are three ways to do this: from the official website, from the Microsoft Store, or from Steam.</p> - <h3>From the official website</h3> -<p>The first way to download and install Brawlhalla on Windows 11 is from the official website. Here are the steps to do this:</p> -<p>How to download brawlhalla on windows 11 for free<br /> -Brawlhalla windows 11 compatibility and system requirements<br /> -Best brawlhalla settings and tips for windows 11 users<br /> -Brawlhalla download link and installation guide for windows 11<br /> -Brawlhalla windows 11 update and new features<br /> -Brawlhalla gameplay and review on windows 11<br /> -How to fix brawlhalla errors and issues on windows 11<br /> -Brawlhalla vs other fighting games on windows 11<br /> -How to play brawlhalla online with friends on windows 11<br /> -Brawlhalla cheats and hacks for windows 11<br /> -How to customize brawlhalla characters and skins on windows 11<br /> -Brawlhalla tournaments and events for windows 11 players<br /> -Brawlhalla cross-play and cross-progression on windows 11<br /> -How to stream brawlhalla on windows 11 using OBS or Twitch<br /> -Brawlhalla best weapons and legends for windows 11<br /> -How to improve brawlhalla performance and FPS on windows 11<br /> -Brawlhalla codes and rewards for windows 11 users<br /> -Brawlhalla mods and fan-made content for windows 11<br /> -Brawlhalla lore and story mode on windows 11<br /> -Brawlhalla fun facts and trivia for windows 11 fans<br /> -How to uninstall brawlhalla on windows 11 safely<br /> -Brawlhalla alternatives and similar games for windows 11<br /> -Brawlhalla memes and jokes for windows 11 gamers<br /> -Brawlhalla wallpapers and backgrounds for windows 11 devices<br /> -Brawlhalla merchandise and accessories for windows 11 enthusiasts<br /> -How to contact brawlhalla support and feedback on windows 11<br /> -Brawlhalla community and forums for windows 11 users<br /> -Brawlhalla patch notes and changelog for windows 11 version<br /> -Brawlhalla ranking and leaderboards for windows 11 players<br /> -Brawlhalla tips and tricks for beginners on windows 11<br /> -How to download brawlhalla beta version on windows 11<br /> -Brawlhalla controller support and configuration on windows 11<br /> -Brawlhalla soundtracks and music for windows 11 users<br /> -Brawlhalla history and development on windows 11 platform<br /> -Brawlhalla future plans and roadmap for windows 11 edition<br /> -How to transfer brawlhalla data from other devices to windows 11<br /> -Brawlhalla keyboard shortcuts and commands for windows 11 users<br /> -Brawlhalla achievements and trophies for windows 11 players<br /> -Brawlhalla DLCs and expansions for windows 11 users<br /> -Brawlhalla collaborations and crossovers with other franchises on windows 11<br /> -How to enable brawlhalla dark mode on windows 11 <br /> -Brawlhalla statistics and analytics for windows 11 users <br /> -Brawlhalla testimonials and reviews from satisfied customers on windows 11 <br /> -How to get brawlhalla premium membership and benefits on windows 11 <br /> -Brawlhalla challenges and quests for windows 11 players <br /> -How to create brawlhalla custom games and maps on windows 11 <br /> -Brawlhalla Easter eggs and secrets for windows 11 users <br /> -How to join brawlhalla clans and teams on windows 11 <br /> -Brawlhalla FAQs and answers for common questions on windows 11</p> - <ol> -<li>Go to the official website of Brawlhalla and click on the "Download" button.</li> -<li>Select "Windows" as your platform and click on the "Download Now" button.</li> -<li>A file named "BrawlhallaSetup.exe" will be downloaded to your PC. Run this file and follow the instructions to install Brawlhalla on your PC.</li> -<li>Once the installation is complete, you can launch Brawlhalla from your desktop or start menu.</li> -</ol> - <h3>From the Microsoft Store</h3> -<p>The second way to download and install Brawlhalla on Windows 11 is from the Microsoft Store. Here are the steps to do this:</p> - <ol> -<li>Open the Microsoft Store app on your PC and search for "Brawlhalla" in the search box.</li> -<li>Select "Brawlhalla" from the search results and click on the "Get" button.</li> -<li>The game will be downloaded and installed on your PC automatically. You can launch Brawlhalla from your start menu or by clicking on the "Play" button in the Microsoft Store app.</li> <h3>From Steam</h3> -<p>The third way to download and install Brawlhalla on Windows 11 is from Steam. Here are the steps to do this:</p> - <ol> -<li>Open the Steam app on your PC and log in to your account. If you don't have a Steam account, you can create one for free.</li> -<li>Go to the "Store" tab and search for "Brawlhalla" in the search box.</li> -<li>Select "Brawlhalla" from the search results and click on the "Play Game" button.</li> -<li>The game will be added to your library and downloaded to your PC automatically. You can launch Brawlhalla from your library or by clicking on the "Play" button in the Steam app.</li> -</ol> - <h2>How to start playing Brawlhalla on Windows 11?</h2> -<p>Now that you have downloaded and installed Brawlhalla on Windows 11, you can start playing it and enjoy the brawling action. Here are some tips to help you get started:</p> - <h3>Choose your legend and game mode</h3> -<p>When you launch Brawlhalla, you will see the main menu where you can choose your legend and game mode. You can select from over 50 legends, each with their own stats, signature moves, and weapon combinations. You can also try out different legends in the training mode or the offline mode. You can choose from various game modes, such as free-for-all, ranked matches, custom games, tournaments, events, and more. You can also play cross-platform with millions of players on PlayStation, Xbox, Nintendo Switch, iOS, Android, and Steam.</p> - <h3>Customize your controls and settings</h3> -<p>Brawlhalla is a game that lets you customize your controls and settings to suit your preferences. You can access the options menu from the main menu or by pressing the escape key during a match. You can change your keyboard or controller layout, adjust your sensitivity, toggle your sound and music, change your language, and more. You can also enable or disable features such as damage numbers, hit effects, stun time, gadget spawn rate, team damage, map voting, and more.</p> - <h3>Join or create a match and brawl</h3> -<p>Brawlhalla is a game that lets you join or create a match and brawl with up to eight players online or locally. You can join a match by selecting a game mode from the online menu or by entering a room code from a friend or a clan. You can create a match by selecting a game mode from the custom games menu or by creating a private room. You can also invite your friends to join your match or chat with other players in the lobby. Once the match starts, you can fight on various stages using different weapons, items, and abilities. The goal is to knock out your opponents as many times as possible before the time runs out or the stock runs out.</p> - <h2>Conclusion</h2> -<p>Brawlhalla is a free-to-play platform fighting game that you can play on your Windows 11 PC. It is a fun and exciting game that lets you brawl with over 50 legends from history and fantasy. You can also play cross-platform with millions of players on PlayStation, Xbox, Nintendo Switch, iOS, Android, and Steam. To play Brawlhalla on Windows 11, you need to meet the system requirements for Windows 11 and download and install Brawlhalla from the official website, the Microsoft Store, or Steam. Then, you can choose your legend and game mode, customize your controls and settings, join or create a match and brawl.</p> - <h2>FAQs</h2> -<p>Here are some frequently asked questions about Brawlhalla on Windows 11:</p> - <h4>Q: Is Brawlhalla free to play?</h4> -<p>A: Yes, Brawlhalla is free to play and does not have any pay-to-win advantages or in-game purchases that affect gameplay. You can unlock skins, taunts, emotes, colors, and more by playing the game or purchasing them with in-game currency or real money.</p> - <h4>Q: Is Brawlhalla cross-play?</h4> -<p>A: Yes, Brawlhalla supports cross-play across all platforms, meaning that you can play with or against anyone who has the game on any device.</p> - <h4>Q: How many players can play Brawlhalla online?</h4> -<p>A: Brawlhalla supports up to eight players online in various game modes.</p> - <h4>Q: How often does Brawlhalla update?</h4> -<p>A: Brawlhalla receives frequent updates that add new legends, weapons, stages, game modes, features, and bug fixes.</p> - <h 4>Q: What are the best legends to play in Brawlhalla?</h4> -<p>A: There is no definitive answer to this question, as different legends have different strengths and weaknesses, and the best legend for you may depend on your playstyle, preference, and skill level. However, some of the most popular and versatile legends in Brawlhalla are Orion, Koji, Val, Hattori, Brynn, and Petra.</p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Township Apk - Snrsz Para Hileli ehir Ynetimi Oyunu.md b/spaces/congsaPfin/Manga-OCR/logs/Township Apk - Snrsz Para Hileli ehir Ynetimi Oyunu.md deleted file mode 100644 index 10000348c03f2763a2308158ac2b6aa6853d85b8..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Township Apk - Snrsz Para Hileli ehir Ynetimi Oyunu.md +++ /dev/null @@ -1,151 +0,0 @@ - -<h1>Township Para Hilesi Apk: What Is It and How to Use It?</h1> - <p>Township is a popular casual game that combines city-building and farming elements. You can build your dream town, harvest crops, process them in factories, trade with other countries, run a zoo, and much more. The game is free to play, but it also offers in-app purchases of coins and cash, which are the main currencies in the game. Coins are used to buy buildings, decorations, crops, animals, and other items, while cash is used to speed up processes, buy special goods, or unlock new features.</p> -<h2>township para hilesi apk</h2><br /><p><b><b>Download File</b> ⚡ <a href="https://urlca.com/2uO6WO">https://urlca.com/2uO6WO</a></b></p><br /><br /> - <p>Para hilesi apk is a Turkish term that means money cheat apk. It is a modified version of the original game that gives you unlimited coins and cash for free. By using this apk, you can enjoy all the benefits of the game without spending any real money. You can expand your town faster, buy anything you want, complete orders and quests easily, and have more fun playing Township.</p> - <p>However, before you decide to download and install township para hilesi apk, you should be aware of some of the risks involved. Using a modified version of the game can expose your device to malware, viruses, or spyware. It can also cause compatibility issues, crashes, or errors in the game. Moreover, it can violate the terms of service of the game and result in your account being banned or suspended by Playrix, the developer of Township.</p> - <p>Therefore, if you want to use township para hilesi apk, you should do so at your own risk and discretion. In this article, we will show you how to download and install township para hilesi apk, how to use it, some tips and tricks for playing Township with it, some alternatives to it, and some frequently asked questions about it.</p> - <h2>How to Download and Install Township Para Hilesi Apk?</h2> - <p>If you have decided to try township para hilesi apk, here are the steps you need to follow:</p> - <h3>Step 1: Find a reliable source <h3>Step 1: Find a reliable source for the apk file</h3> - <p>One of the most important steps to download and install township para hilesi apk is to find a reliable source for the apk file. There are many websites that claim to offer the apk file, but not all of them are trustworthy. Some of them may contain malware, viruses, or spyware that can harm your device or steal your personal information. Some of them may also provide outdated or fake versions of the apk file that may not work properly or cause errors in the game.</p> -<p>township şehir ve çiftlik para hilesi apk<br /> -township mod apk sınırsız para hilesi<br /> -township para hilesi apk indir<br /> -township para hilesi apk dayı<br /> -township para hilesi apk 2023<br /> -township para hilesi apk android oyun club<br /> -township para hilesi apk son sürüm<br /> -township para hilesi apk güncel<br /> -township para hilesi apk cepde<br /> -township para hilesi apk mobilism<br /> -township para hilesi apk oyun59<br /> -township para hilesi apk oyun indir club<br /> -township para hilesi apk oyun indir vip<br /> -township para hilesi apk tamindir<br /> -township para hilesi apk full indir<br /> -township para hilesi apk revdl<br /> -township para hilesi apk rexdl<br /> -township para hilesi apk apkpure<br /> -township para hilesi apk happymod<br /> -township para hilesi apk moddroid<br /> -township para hilesi apk andropalace<br /> -township para hilesi apk androeed.ru<br /> -township para hilesi apk an1.com<br /> -township para hilesi apk android 1.com<br /> -township para hilesi apk androidoyun.club[^1^]<br /> -township şehir kurma oyunu para hilesi apk<br /> -township şehir inşa etme oyunu para hilesi apk<br /> -township şehir yönetme oyunu para hilesi apk<br /> -township şehir ve çiftlik mod menu para hilesi apk<br /> -township şehir ve çiftlik sınırsız altın ve nakit para hilesi apk<br /> -township şehir ve çiftlik online para hilesi apk<br /> -township şehir ve çiftlik offline para hilesi apk<br /> -township şehir ve çiftlik hack mod para hilesi apk<br /> -township şehir ve çiftlik mega mod para hilesi apk<br /> -township şehir ve çiftlik premium mod para hilesi apk<br /> -township şehir ve çiftlik pro mod para hilesi apk<br /> -township şehir ve çiftlik vip mod para hilesi apk<br /> -township şehir ve çiftlik plus mod para hilesi apk<br /> -township şehir ve çiftlik unlimited money mod para hilesi apk<br /> -township şehir ve çiftlik unlimited cash mod para hilesi apk<br /> -township şehir ve çiftlik unlimited coins mod para hilesi apk<br /> -township şehir ve çiftlik unlimited gems mod para hilesi apk<br /> -township şehir ve çiftlik unlimited resources mod para hilesi apk<br /> -township şehir ve çiftlik unlimited everything mod para hilesi apk<br /> -township şehir ve çiftlik god mode mod para hilesi apk<br /> -township şehir ve çiftlik no ads mod para hilesi apk<br /> -township şehir ve çiftlik no root mod para hilesi apk<br /> -township şehir ve çiftlik no ban mod para hilesi apk<br /> -township şehir ve çiftlik latest version mod para hilesi apk</p> - <p>Therefore, you should be careful and do some research before downloading any apk file from the internet. You should look for reviews, ratings, comments, and feedback from other users who have downloaded the apk file from the same source. You should also check the size, date, and version of the apk file to make sure it matches the original game. You should also scan the apk file with an antivirus or anti-malware program before installing it on your device.</p> - <p>One of the sources that we have found to be reliable and safe for downloading township para hilesi apk is ApkDahisi.com. This website offers a variety of modded and hacked games, including Township. The apk file that they provide is updated regularly and has been tested by many users. You can download the apk file from their website by clicking on the link below:</p> - <p><a href="(^1^)">Township Sınırsız PARA Hileli – Mod Apk - ApkDahisi.com</a></p> <h3>Step 2: Enable unknown sources on your device</h3> - <p>After you have downloaded the apk file from a reliable source, you need to enable unknown sources on your device. This is a security setting that allows you to install apps from sources other than the Google Play Store. By default, this setting is disabled on most Android devices, so you need to enable it manually before installing township para hilesi apk.</p> - <p>To enable unknown sources on your device, follow these steps:</p> - <ul> -<li>Go to the Settings app on your device and tap on Security or Privacy.</li> -<li>Find the option that says Unknown Sources or Install Unknown Apps and toggle it on.</li> -<li>A warning message may appear, asking you to confirm your action. Tap on OK or Allow to proceed.</li> -</ul> - <p>Once you have enabled unknown sources on your device, you are ready to install township para hilesi apk.</p> - <h3>Step 3: Download and install the apk file</h3> - <p>The final step to download and install township para hilesi apk is to locate the apk file on your device and install it. To do this, follow these steps:</p> - <ul> -<li>Open the File Manager app on your device and navigate to the folder where you have saved the apk file. Alternatively, you can also use a browser or a downloader app to access the apk file.</li> -<li>Tap on the apk file and select Install. A pop-up window may appear, asking you to grant permissions to the app. Tap on Accept or Install Anyway to continue.</li> -<li>Wait for the installation process to complete. It may take a few seconds or minutes, depending on the size of the apk file and the speed of your device.</li> -<li>Once the installation is done, you can tap on Open to launch the game or Done to exit the installer.</li> -</ul> - <p>Congratulations! You have successfully downloaded and installed township para hilesi apk on your device. You can now enjoy unlimited coins and cash in Township.</p> <h2>How to Use Township Para Hilesi Apk?</h2> - <p>Now that you have downloaded and installed township para hilesi apk, you may be wondering how to use it. It is very simple and easy to use, as it works just like the original game. Here are the steps you need to follow:</p> - <h3>Step 1: Launch the game and log in with your account</h3> - <p>The first step to use township para hilesi apk is to launch the game and log in with your account. You can use the same account that you have used in the original game, or you can create a new one if you want. You can also choose to play as a guest, but you will not be able to save your progress or access some features.</p> - <p>Once you have logged in with your account, you will see the main screen of the game, where you can see your town, farm, zoo, airport, and other buildings. You can also see your level, coins, cash, and other resources at the top of the screen.</p> - <h3>Step 2: Tap on the menu icon and select the para hilesi option</h3> - <p>The next step to use township para hilesi apk is to tap on the menu icon at the top left corner of the screen and select the para hilesi option. This will open a new window where you can see the options for adding coins and cash to your account.</p> - <p>You can also access the para hilesi option by tapping on the plus sign next to your coins or cash at the top of the screen. This will take you directly to the window where you can choose the amount of coins or cash you want to add.</p> - <h3>Step 3: Choose the amount of coins and cash you want to add to your account</h3> - <p>The final step to use township para hilesi apk is to choose the amount of coins and cash you want to add to your account. You can choose from different amounts, ranging from 1000 to 9999999. You can also enter a custom amount if you want.</p> - <p>Once you have chosen the amount of coins or cash you want to add, tap on the Generate button and wait for a few seconds. The coins or cash will be added to your account instantly. You can see the updated balance at the top of the screen.</p> - <p>You can repeat this process as many times as you want, as there is no limit to how much coins or cash you can add with township para hilesi apk. However, you should be careful not to add too much at once, as this may raise suspicion from Playrix or other players.</p> - <p>Congratulations! You have successfully used township para hilesi apk and added unlimited coins and cash to your account. You can now enjoy playing Township without any restrictions or limitations.</p> <h2>Tips and Tricks for Playing Township with Para Hilesi Apk</h2> - <p>Now that you know how to use township para hilesi apk, you may be wondering how to make the most of it. Here are some tips and tricks that can help you play Township with para hilesi apk more effectively and enjoyably:</p> - <h3>Tip 1: Use your coins and cash wisely to expand your town and farm</h3> - <p>One of the main goals of playing Township is to expand your town and farm and make them more beautiful and prosperous. You can use your coins and cash to buy new buildings, decorations, crops, animals, and other items that can enhance your town and farm. You can also use them to upgrade your existing buildings, factories, trains, planes, and other facilities that can improve your production and transportation.</p> - <p>However, you should not spend your coins and cash recklessly or randomly. You should plan ahead and prioritize the items that you need or want the most. You should also consider the space, time, and resources required for each item. You should balance your spending between your town and farm, as they are both important for your progress and income.</p> - <p>By using your coins and cash wisely, you can expand your town and farm faster and more efficiently, and avoid wasting them on unnecessary or low-value items.</p> - <h3>Tip 2: Complete orders and quests to earn more rewards and XP</h3> - <p>Another way to play Township with para hilesi apk is to complete orders and quests that are available in the game. Orders are requests from your customers or other towns that you can fulfill by producing and delivering certain goods. Quests are tasks or challenges that you can complete by performing certain actions or reaching certain goals in the game.</p> - <p>By completing orders and quests, you can earn more rewards and XP that can help you level up faster and unlock new features and items in the game. You can also earn more coins, cash, gems, boosters, materials, and other resources that can be useful for your town and farm.</p> - <p>However, you should not accept or complete every order or quest that comes your way. You should choose the ones that are worth your time and effort, and match your preferences and goals. You should also check the requirements, rewards, and deadlines for each order or quest before accepting or completing them. You should avoid the ones that are too difficult, expensive, or time-consuming for you.</p> - <p>By completing orders and quests strategically, you can maximize your rewards and XP, and avoid wasting your resources or missing out on better opportunities.</p> - <h3>Tip 3: Join a clan and cooperate with other players to get more benefits</h3> - <p>A third tip to play Township with para hilesi apk is to join a clan and cooperate with other players who are also playing the game. A clan is a group of players who share a common name, logo, chat room, and leaderboard. You can join an existing clan or create your own clan in the game.</p> - <p>By joining a clan, you can get more benefits from playing Township with para hilesi apk. You can chat with other clan members, share tips and tricks, ask for help or advice, send or receive gifts or resources, trade goods or materials, participate in clan events or competitions, earn clan points or rewards, and much more.</p> - <p>However, you should not join any clan that invites you or accepts you. You should look for a clan that suits your style, level, language, region, activity, goals, and expectations. You should also be respectful, friendly, helpful, active, loyal, and cooperative with your clan members. You should follow the rules and guidelines of your clan leader or co-leader. You should avoid causing trouble or conflict with your clan members or other clans.</p> - <p>By joining a clan wisely, you can enhance your social experience of playing Township with para hilesi apk, and get more support and fun from other players.</p> <h2>Alternatives to Township Para Hilesi Apk</h2> - <p>Township para hilesi apk is not the only option that you have to get unlimited coins and cash in Township. There are some alternatives that you can try if you want to play the game differently or if you encounter any problems with township para hilesi apk. Here are some of the alternatives that you can consider:</p> - <h3>Alternative 1: Township Mod Apk</h3> - <p>Township mod apk is another modified version of the original game that offers some extra features and benefits. Unlike township para hilesi apk, which only gives you unlimited coins and cash, township mod apk also gives you unlimited gems, boosters, materials, and other resources. It also unlocks all the buildings, decorations, crops, animals, and other items in the game. It also removes the ads and pop-ups that may interrupt your gameplay.</p> - <p>Township mod apk is similar to township para hilesi apk in terms of how to download, install, and use it. You just need to find a reliable source for the apk file, enable unknown sources on your device, and install the apk file on your device. You can then launch the game and enjoy all the features and benefits of township mod apk.</p> - <p>However, township mod apk also has some drawbacks that you should be aware of. It may not be compatible with the latest version of the game or your device. It may also cause some glitches, bugs, or errors in the game. It may also violate the terms of service of the game and result in your account being banned or suspended by Playrix.</p> - <p>Therefore, if you want to use township mod apk, you should do so at your own risk and discretion. One of the sources that we have found to be reliable and safe for downloading township mod apk is Rexdl.com. You can download the apk file from their website by clicking on the link below:</p> - <p><a href="">Township 9.0.0 Apk + Mod (Unlimited Money) for Android - Rexdl.com</a></p> - <h3>Alternative 2: Township Cheats and Hacks</h3> - <p>Township cheats and hacks are another option that you have to get unlimited coins and cash in Township. Unlike township para hilesi apk or township mod apk, which are modified versions of the game, township cheats and hacks are tools or methods that can manipulate the game data or system to give you more coins and cash. They can be online or offline, free or paid, simple or complex.</p> - <p>Township cheats and hacks vary in terms of how to access, use, and apply them. Some of them may require you to visit a website, enter your username or email, choose the amount of coins or cash you want, and click on a button. Some of them may require you to download a software or an app, connect your device to your computer, run the program or app, and follow some instructions. Some of them may require you to root or jailbreak your device, modify some files or codes, or use some commands.</p> - <p>By using township cheats and hacks, you can get unlimited coins and cash in Township without downloading or installing any apk file on your device. You can also use them on any device or platform, such as Android, iOS, Windows, Mac, etc.</p> - <p>However, township cheats and hacks also have some disadvantages that you should be aware of. They may not work properly or at all, depending on the version of the game or your device. They may also expose your device or account to malware, viruses, spyware, phishing, hacking, or other threats. They may also violate the terms of service of the game and result in your account being banned or suspended by Playrix.</p> - <p>Therefore, if you want to use township cheats and hacks, you should do so at your own risk and discretion. One of the sources that we have found to be reliable and safe for using township cheats and hacks is Cheatallgame.com. You can access their website by clicking on the link below:</p> - <p><a href="">Township Cheats - Unlimited Coins & Cash Hack - Cheatallgame.com</a></p> <h2>Conclusion</h2> - <p>Township is a fun and addictive game that lets you build your own town and farm. However, it can also be challenging and frustrating if you run out of coins and cash, which are the main currencies in the game. That is why some players resort to using township para hilesi apk, a modified version of the game that gives you unlimited coins and cash for free.</p> - <p>In this article, we have shown you what township para hilesi apk is, how to download and install it, how to use it, some tips and tricks for playing Township with it, and some alternatives to it. We hope that this article has been helpful and informative for you.</p> - <p>However, we also want to remind you that using township para hilesi apk is not without risks. It can expose your device or account to malware, viruses, spyware, hacking, or other threats. It can also cause compatibility issues, crashes, or errors in the game. It can also violate the terms of service of the game and result in your account being banned or suspended by Playrix.</p> - <p>Therefore, if you want to use township para hilesi apk, you should do so at your own risk and discretion. You should also be careful not to abuse or overuse it, as this may ruin the fun and balance of the game. You should also respect the rights and efforts of Playrix and other players who are playing the game fairly and legitimately.</p> - <p>If you have any questions, comments, or feedback about this article or township para hilesi apk, feel free to leave them below. We would love to hear from you and help you out. Thank you for reading and happy gaming!</p> - <h2>FAQs</h2> - <p>Here are some of the frequently asked questions about township para hilesi apk:</p> - <h3>Q1: Is Township Para Hilesi Apk safe to use?</h3> - <p>A1: Township para hilesi apk is not completely safe to use, as it can expose your device or account to malware, viruses, spyware, hacking, or other threats. It can also cause compatibility issues, crashes, or errors in the game. It can also violate the terms of service of the game and result in your account being banned or suspended by Playrix.</p> - <p>Therefore, if you want to use township para hilesi apk, you should do so at your own risk and discretion. You should also download it from a reliable source, scan it with an antivirus or anti-malware program, enable unknown sources on your device, and install it carefully.</p> - <h3>Q2: Will I get banned for using Township Para Hilesi Apk?</h3> - <p>A2: There is a possibility that you will get banned for using township para hilesi apk, as it violates the terms of service of the game. Playrix has the right to monitor, detect, and punish any players who are using modified versions of the game or cheating in any way. If they find out that you are using township para hilesi apk, they may ban or suspend your account permanently or temporarily.</p> - <p>Therefore, if you want to use township para hilesi apk, you should be prepared for the consequences. You should also avoid using it on your main account or on accounts that have a lot of progress or purchases. You should also avoid adding too much coins or cash at once or doing anything suspicious that may attract attention from Playrix or other players.</p> - <h3>Q3: How can I update Township Para Hilesi Apk?</h3> - <p>A3: Township para hilesi apk may not be compatible with the latest version of the game or your device. If there is a new update for the game or your device, you may need to update township para hilesi apk as well. To do this, you need to find a reliable source that offers the updated version of township para hilesi apk. You can then download and install it on your device following the same steps as before.</p> - <p>However, you should be careful not to update township para hilesi apk before checking if it works properly or not. Some updates may not work well with township para hilesi apk or may cause some problems in the game. You should also backup your data before updating township para hilesi apk in case something goes wrong.</p> - <h3>Q4: Can I use Township Para Hilesi Apk on iOS devices?</h3> - <p>A4: No, you cannot use township para hilesi apk on iOS devices such as iPhones or iPads. Township para hilesi apk is only compatible with Android devices such as smartphones or tablets. If you want to use a modified version of the game on iOS devices, you may need to look for other options such as Township mod ipa or Township hack tools or Township cheats and hacks. However, these options may also have some risks and limitations that you should be aware of.</p> - <h3>Q5: What are some other games like Township that I can play?</h3> - <p>A5: Township is a unique game that combines city-building and farming elements, but it is not the only game of its kind. There are some other games like Township that you can play if you want to try something different or if you get bored of Township. Here are some of the games that we recommend:</p> - <ul> -<li>FarmVille 2: Country Escape: This is a sequel to the famous FarmVille game that lets you build your own farm, grow crops, raise animals, craft goods, trade with friends, and explore new areas. You can also join a co-op and compete with other players in events and challenges.</li> -<li>Hay Day: This is another popular farming game that lets you create your own farm, grow crops, tend animals, make products, trade with neighbors, and customize your farm. You can also join a neighborhood and chat with other players.</li> -<li>SimCity BuildIt: This is a city-building game that lets you create your own city, manage its resources, services, and citizens, and solve problems. You can also trade with other cities, join clubs, and compete in contests and wars.</li> -<li>Megapolis: This is another city-building game that lets you design and develop your own metropolis, build infrastructure, industries, and landmarks, and manage its economy and ecology. You can also cooperate with other players in state contests and alliances.</li> -<li>Homescapes: This is a casual game that lets you renovate and decorate your old mansion, solve puzzles, complete tasks, and uncover secrets. You can also interact with various characters, pets, and neighbors.</li> -</ul> - <p>These are some of the games that are similar to Township that you can play on your device. You can find them on the Google Play Store or the App Store and download them for free.</p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Unleash the Plague with Plague Inc APK Hack Mod - Free Download.md b/spaces/congsaPfin/Manga-OCR/logs/Unleash the Plague with Plague Inc APK Hack Mod - Free Download.md deleted file mode 100644 index e104c10e7ba25fe22963c79ae3e0d412082f4374..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Unleash the Plague with Plague Inc APK Hack Mod - Free Download.md +++ /dev/null @@ -1,53 +0,0 @@ - -<h1>Plague Inc APK Hack Mod: How to Infect the World with Unlimited Resources</h1> -<p>Do you love playing strategy games that challenge your mind and creativity? Do you want to create and unleash a deadly virus that can wipe out humanity? If yes, then you should try Plague Inc, a popular game that simulates a global pandemic. But be warned, this game is not easy. You will need a lot of resources and skills to succeed. That's why we have created this guide to help you download and install Plague Inc APK Hack Mod, a modified version of the game that gives you unlimited money, DNA points, and premium features. With this hack mod, you can infect the world with ease and have fun along the way.</p> -<h2>plague inc apk hack mod</h2><br /><p><b><b>Download Zip</b> 🗸 <a href="https://urlca.com/2uOfQH">https://urlca.com/2uOfQH</a></b></p><br /><br /> -<h2>What is Plague Inc?</h2> -<h3>A strategy game that simulates a global pandemic</h3> -<p>Plague Inc is a strategy game developed by Ndemic Creations. It was released in 2012 for iOS and Android devices, and later for Windows, Mac, Linux, PlayStation 4, Xbox One, and Nintendo Switch. The game has been downloaded over 200 million times and has received critical acclaim for its realistic and engaging gameplay.</p> -<h3>The goal is to evolve your pathogen and wipe out humanity</h3> -<p>In Plague Inc, you play as a pathogen, a microorganism that causes disease. Your goal is to evolve your pathogen and spread it across the world, infecting as many people as possible. You will also have to adapt to different environments, climates, countries, and responses from governments and organizations. Your ultimate objective is to kill all humans before they find a cure or eradicate your pathogen.</p> -<h3>The game features realistic scenarios and challenges</h3> -<p>Plague Inc is not just a simple game of infection. It is also a game of strategy, simulation, and education. The game features realistic scenarios and challenges that reflect the current state of the world. You will have to deal with factors such as population density, air travel, health care, research, politics, media, natural disasters, climate change, mutations, genetic engineering, bioweapons, zombies, vampires, aliens, and more. The game also provides information and facts about diseases and pandemics, making it both fun and educational.</p> -<h2>Why use Plague Inc APK Hack Mod?</h2> -<h3>The original game can be hard and frustrating</h3> -<p>Plague Inc is a challenging game that requires a lot of strategy and patience. You will have to balance between infecting and killing people, while avoiding detection and cure. You will also have to deal with various obstacles and events that can hinder your progress. The game can be hard and frustrating, especially for beginners and casual players.</p> -<h3>The hack mod gives you unlimited money and DNA points</h3> -<p>Plague Inc APK Hack Mod is a modified version of the game that gives you unlimited money and DNA points. Money and DNA points are the main resources in the game that you need to evolve your pathogen and unlock new features. With the hack mod, you can get as much money and DNA points as you want, without having to wait or watch ads. You can use them to upgrade your pathogen, buy genes, abilities, traits, symptoms, transmissions, and more.</p> -<p>[Plague Inc MOD APK 1.19.10 (Unlocked) for Android - APKdone](^1^)</p> -<h3>The hack mod unlocks all the premium features and content</h3> -<p>Plague Inc APK Hack Mod also unlocks all the premium features and content that are normally available only through in-app purchases or achievements. These include 12 different pathogen types, such as bacteria, virus, fungus, parasite, prion, nano-virus, bio-weapon, simian flu, shadow plague, necroa virus, neurax worm, and cheat mode. You can also access 25 different scenarios, such as black death, swine flu, smallpox, global warming, zombie apocalypse, vampire uprising, alien invasion, and more. You can also customize your pathogen with over 100 genes that can enhance its abilities and effects.</p> -<h2>How to download and install Plague Inc APK Hack Mod?</h2> -<h3>Find a reliable source for the modded file</h3> -<p>The first step to download and install Plague Inc APK Hack Mod is to find a reliable source for the modded file. There are many websites that claim to offer the hack mod, but not all of them are safe and trustworthy. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. To avoid these risks, we recommend you to download the hack mod from our website, which is tested and verified by our team.</p> -<h3>Enable unknown sources on your device settings</h3> -<p>The next step is to enable unknown sources on your device settings. This is necessary because the hack mod is not available on the official Google Play Store or App Store. To enable unknown sources, go to your device settings > security > unknown sources > toggle on. This will allow you to install apps from sources other than the official stores.</p> -<h3>Download and install the file on your device</h3> -<p>The final step is to download and install the file on your device. To do this, go to our website and click on the download button. The file will be downloaded to your device storage. Then, locate the file using a file manager app and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish. Once done, you can launch the game from your app drawer or home screen.</p> <h2>How to play Plague Inc APK Hack Mod?</h2> -<h3>Choose your pathogen type and difficulty level</h3> -<p>Once you have installed the game, you can start playing it by choosing your pathogen type and difficulty level. You can choose from 12 different pathogen types, each with its own advantages and disadvantages. For example, bacteria are the most common and adaptable, but also the easiest to cure. Virus are fast and unpredictable, but also unstable and hard to control. Fungus are slow and discreet, but also difficult to spread. You can also choose from four difficulty levels, ranging from casual to mega-brutal. The higher the difficulty, the more realistic and challenging the game becomes.</p> -<h3>Use your money and DNA points to upgrade your pathogen</h3> -<p>After choosing your pathogen and difficulty, you will start the game by selecting a country to infect. You will then see a world map with various information and statistics about the infection and the humanity. You will also see your money and DNA points at the bottom of the screen. Money is used to buy new features and content in the game, such as genes, scenarios, cheats, etc. DNA points are used to evolve your pathogen and make it more lethal, infectious, and resistant. You can get more money and DNA points by infecting and killing people, popping bubbles, completing objectives, etc.</p> -<h3>Spread your infection across the world and watch humanity fall</h3> -<p>Your main goal in the game is to spread your infection across the world and watch humanity fall. You can do this by upgrading your pathogen with various traits, symptoms, transmissions, and abilities. For example, you can make your pathogen airborne, waterborne, insect-borne, etc. You can also make it cause coughing, sneezing, vomiting, fever, insomnia, paranoia, coma, necrosis, etc. You can also give it abilities such as heat resistance, cold resistance, drug resistance, etc. You will have to be strategic and adaptive to overcome the different challenges and responses from the humanity. You will also have to monitor the cure progress and prevent it from reaching 100%. The game ends when either all humans are dead or all pathogens are eradicated.</p> -<h2>What are the benefits of Plague Inc APK Hack Mod?</h2> -<h3>You can enjoy the game without any limitations or ads</h3> -<p>One of the benefits of Plague Inc APK Hack Mod is that you can enjoy the game without any limitations or ads. You don't have to worry about running out of money or DNA points, or having to wait for them to regenerate. You also don't have to watch annoying ads that interrupt your gameplay or offer you useless rewards. You can play the game as much as you want, whenever you want.</p> -<h3>You can experiment with different strategies and scenarios</h3> -<p>Another benefit of Plague Inc APK Hack Mod is that you can experiment with different strategies and scenarios. You can try out different pathogen types, difficulty levels, genes, traits, symptoms, transmissions, abilities, etc. You can also play different scenarios that offer unique challenges and objectives. For example, you can play as a zombie virus that creates hordes of undead, or as a vampire plague that turns humans into bloodthirsty creatures. You can also create your own scenarios using the scenario creator tool.</p> -<h3>You can have fun and learn about diseases and pandemics</h3> -<p>A final benefit of Plague Inc APK Hack Mod is that you can have fun and learn about diseases and pandemics. The game is not only entertaining but also educational. It teaches you about the basics of epidemiology, microbiology, virology, immunology, etc. It also shows you how diseases spread and evolve in real life, how humans react and respond to them, how governments and organizations deal with them, etc. The game also provides facts and information about various diseases and pandemics in history and in fiction.</p> <h2>Conclusion</h2> -<p>Plague Inc APK Hack Mod is a great way to experience the game of Plague Inc. It gives you unlimited resources and features that allow you to infect the world with ease and fun. You can download and install it easily from our website, which is safe and reliable. You can also play the game offline or online, with or without cheats, and with or without scenarios. You can also create your own scenarios and share them with other players. Plague Inc APK Hack Mod is a game that will challenge your mind, creativity, and morality. Are you ready to infect the world?</p> -<h2>FAQs</h2> -<h3>Is Plague Inc APK Hack Mod safe to use?</h3> -<p>Yes, Plague Inc APK Hack Mod is safe to use, as long as you download it from our website, which is tested and verified by our team. We do not include any viruses, malware, or spyware in our files. However, we are not responsible for any damage or loss that may occur from using the hack mod. Use it at your own risk.</p> -<h3>Is Plague Inc APK Hack Mod compatible with my device?</h3> -<p>Plague Inc APK Hack Mod is compatible with most Android devices that run on Android 4.1 or higher. However, some devices may not support the hack mod or the game itself due to hardware or software limitations. If you encounter any problems or errors while playing the hack mod, please contact us and we will try to help you.</p> -<h3>How do I update Plague Inc APK Hack Mod?</h3> -<p>To update Plague Inc APK Hack Mod, you will have to download and install the latest version of the hack mod from our website. You will also have to uninstall the previous version of the hack mod before installing the new one. You may also have to enable unknown sources again on your device settings. Please note that updating the hack mod may erase your progress and data in the game.</p> -<h3>Can I play Plague Inc APK Hack Mod online with other players?</h3> -<p>Yes, you can play Plague Inc APK Hack Mod online with other players, as long as they are also using the hack mod. You can join or create multiplayer games and compete or cooperate with other players. You can also chat with them and share your scenarios and strategies. However, please be respectful and fair to other players and do not use cheats or hacks that may ruin their experience.</p> -<h3>Where can I get more information about Plague Inc APK Hack Mod?</h3> -<p>You can get more information about Plague Inc APK Hack Mod from our website, where you can also find the download link, installation guide, features list, screenshots, videos, reviews, comments, and more. You can also visit the official website of Plague Inc, where you can learn more about the game itself, its developer, its updates, its news, its community, its support, etc.</p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/World Soccer League The Offline Soccer Game that Lets You Score Goals on Android.md b/spaces/congsaPfin/Manga-OCR/logs/World Soccer League The Offline Soccer Game that Lets You Score Goals on Android.md deleted file mode 100644 index 7cfd66f8a1ccddb09a7e7667b20804bd34c6ba2a..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/World Soccer League The Offline Soccer Game that Lets You Score Goals on Android.md +++ /dev/null @@ -1,88 +0,0 @@ -<br /> -<h1>World Soccer League Game Download for Android</h1> -<p>If you are a fan of soccer games and you are looking for a fun and exciting way to enjoy the sport on your mobile device, you might want to check out World Soccer League. This is a free sports game developed by mobirix that lets you play soccer with a variety of teams and players. You can also play it offline without the need for an internet connection. In this article, we will tell you more about what World Soccer League is, why you should download it for Android, what features it offers, how to download and install it, and some tips and tricks for playing it.</p> - <h2>Introduction</h2> -<p>Soccer is one of the most popular and widely played sports in the world. Millions of people watch and follow their favorite teams and players in various leagues and tournaments. However, not everyone has the opportunity or the skills to play soccer in real life. That's why soccer games are so popular among gamers who want to experience the thrill and excitement of the sport on their screens.</p> -<h2>world soccer league game download for android</h2><br /><p><b><b>Download</b> ⚡ <a href="https://urlca.com/2uO4pn">https://urlca.com/2uO4pn</a></b></p><br /><br /> -<p>There are many soccer games available for mobile devices, but not all of them are equally good. Some have poor graphics, complicated controls, or require an internet connection to play. If you are looking for a soccer game that is simple, fun, and offline, you might want to try World Soccer League.</p> - <h3>What is World Soccer League?</h3> -<p>World Soccer League is a free sports game developed by mobirix for mobile devices. It's a soccer title featuring a sizable roster of teams and players to choose from. It can also be played offline without the need for an active internet connection.</p> -<p>World Soccer League has similar gameplay to other soccer games like Dream League Soccer or eFootball PES 2021. It is noticeably different in the looks department, though, utilizing low-poly 3D graphics. It features simple controls and supports gaming on larger screens like tablets for a better view of the field and a more comfortable experience.</p> - <h3>Why download World Soccer League for Android?</h3> -<p>There are many reasons why you might want to download World Soccer League for Android. Here are some of them:</p> -<ul> -<li>It's free. You don't have to pay anything to download and play World Soccer League. You can enjoy the game without spending any money.</li> -<li>It's offline. You don't need an internet connection to play World Soccer League. You can play it anytime and anywhere you want, even if you don't have access to Wi-Fi or mobile data.</li> -<li>It's fun. World Soccer League is a fun and exciting game that will keep you entertained for hours. You can choose from 60 national teams, 60 clubs, and over 2000 players to create your dream team. You can also compete in various modes such as Exhibition, Cup, League, or Training.</li> -</ul> - <h2>Features of World Soccer League</h2> -<p>World Soccer League has many features that make it a great soccer game for Android. Here are some of them:</p> - <h3>Offline mode</h3> -<p>One of the best features of World Soccer League is its offline mode. You don't need an internet connection to play the game. You can enjoy the game without worrying about lag, glitches, or ads. You can also save your progress and resume it later.</p> -<p>world soccer league apk free download for android<br /> -world soccer league offline game for android<br /> -world soccer league 2023 mod apk download for android<br /> -world soccer league game play online on android<br /> -world soccer league best teams and players for android<br /> -world soccer league game review and rating for android<br /> -world soccer league game tips and tricks for android<br /> -world soccer league game cheats and hacks for android<br /> -world soccer league game latest version download for android<br /> -world soccer league game update and patch notes for android<br /> -world soccer league game features and modes for android<br /> -world soccer league game system requirements and compatibility for android<br /> -world soccer league game graphics and sound quality for android<br /> -world soccer league game controls and settings for android<br /> -world soccer league game achievements and leaderboards for android<br /> -world soccer league game support and feedback for android<br /> -world soccer league game alternatives and similar games for android<br /> -world soccer league game install and uninstall guide for android<br /> -world soccer league game size and storage space for android<br /> -world soccer league game data usage and battery consumption for android<br /> -world soccer league game screenshots and videos for android<br /> -world soccer league game developer and publisher information for android<br /> -world soccer league game license and terms of service for android<br /> -world soccer league game privacy policy and security for android<br /> -world soccer league game community and social media for android<br /> -how to play world soccer league game on android tv<br /> -how to play world soccer league game on android emulator<br /> -how to play world soccer league game with friends on android<br /> -how to play world soccer league game with controller on android<br /> -how to play world soccer league game with keyboard and mouse on android<br /> -how to fix world soccer league game not working on android<br /> -how to fix world soccer league game crashing on android<br /> -how to fix world soccer league game lagging on android<br /> -how to fix world soccer league game freezing on android<br /> -how to fix world soccer league game loading error on android<br /> -how to fix world soccer league game black screen on android<br /> -how to fix world soccer league game sound problem on android<br /> -how to fix world soccer league game connection issue on android<br /> -how to fix world soccer league game login error on android<br /> -how to fix world soccer league game compatibility issue on android<br /> -how to backup and restore world soccer league game data on android<br /> -how to transfer world soccer league game data from one device to another on android<br /> -how to clear cache and data of world soccer league game on android<br /> -how to change language and region of world soccer league game on android<br /> -how to enable or disable notifications of world soccer league game on android<br /> -how to enable or disable ads of world soccer league game on android<br /> -how to enable or disable in-app purchases of world soccer league game on android<br /> -how to enable or disable auto-update of world soccer league game on android</p> - <h3>Simple controls</h3> -<p>Another great feature of World Soccer League is its simple controls. You don't need to memorize complicated buttons or gestures to play the game. On the left side of the screen, you get a virtual omni-directional joystick to move your character. Meanwhile, the right side houses buttons for various actions that you can take, including short and long and perspectives. You can also zoom in and out, pause, rewind, or fast-forward the action. You can also save your best moments and share them with your friends. You can use the camera icon at the top right corner of the screen to save a screenshot or a video clip of your replay. You can also use the share icon to send your replay to your social media accounts or other apps.</p> - <h2>Conclusion</h2> -<p>World Soccer League is a free and offline soccer game for Android devices. It has simple controls, low-poly 3D graphics, and various modes and features. It lets you play soccer with 60 national teams, 60 clubs, and over 2000 players. You can also watch and save your replays from different angles and perspectives. You can download and install World Soccer League from Google Play Store or from an APK file. You can also improve your skills and performance by following some tips and tricks.</p> -<p>If you are looking for a fun and exciting soccer game for your Android device, you should give World Soccer League a try. It's a game that will keep you entertained for hours and make you feel like a soccer star.</p> - <h2>FAQs</h2> -<p>Here are some frequently asked questions about World Soccer League:</p> - <h3>Q: How do I change the language of the game?</h3> -<p>A: You can change the language of the game by going to the settings menu and tapping on the language option. You can choose from English, Spanish, Portuguese, French, German, Italian, Russian, Turkish, Arabic, Chinese, Japanese, or Korean.</p> - <h3>Q: How do I unlock more teams and players?</h3> -<p>A: You can unlock more teams and players by playing the game and earning coins. You can use coins to buy new teams and players from the shop. You can also get coins by watching ads or completing achievements.</p> - <h3>Q: How do I update the game?</h3> -<p>A: You can update the game by going to Google Play Store and checking for updates. If there is an update available, you can tap on "Update" to download and install it. If you downloaded the game from an APK file, you need to find the latest version of the APK file and install it manually.</p> - <h3>Q: How do I contact the developer of the game?</h3> -<p>A: You can contact the developer of the game by sending an email to help@mobirix.com or by visiting their website at https://www.mobirix.com/en/. You can also follow them on Facebook at https://www.facebook.com/mobirixplayen or on YouTube at https://www.youtube.com/user/mobirix1.</p> - <h3>Q: How do I rate and review the game?</h3> -<p>A: You can rate and review the game by going to Google Play Store and tapping on "Rate" or "Write a review". You can also share your feedback and suggestions with other players in the comments section.</p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Angaar Vadee marathi movie free download mp4 Watch the action drama starring Arun Govil and Ragini.md b/spaces/contluForse/HuggingGPT/assets/Angaar Vadee marathi movie free download mp4 Watch the action drama starring Arun Govil and Ragini.md deleted file mode 100644 index 0310e731a0588a373498771ed503cc12183e926d..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Angaar Vadee marathi movie free download mp4 Watch the action drama starring Arun Govil and Ragini.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Angaar Vadee marathi movie free download mp4</h2><br /><p><b><b>Download Zip</b> ☆☆☆☆☆ <a href="https://ssurll.com/2uzxFO">https://ssurll.com/2uzxFO</a></b></p><br /><br /> -<br /> - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/contluForse/HuggingGPT/assets/Download Film Proposal Daisakusen Subtitle Indones Cinta Persahabatan dan Mimpi di Balik Foto-foto Kenangan.md b/spaces/contluForse/HuggingGPT/assets/Download Film Proposal Daisakusen Subtitle Indones Cinta Persahabatan dan Mimpi di Balik Foto-foto Kenangan.md deleted file mode 100644 index 4d0de2c99cf5a49904d085369794162a740a7eaa..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Download Film Proposal Daisakusen Subtitle Indones Cinta Persahabatan dan Mimpi di Balik Foto-foto Kenangan.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Download Film Proposal Daisakusen Subtitle Indones reise firma intim su</h2><br /><p><b><b>DOWNLOAD</b> ››››› <a href="https://ssurll.com/2uzxek">https://ssurll.com/2uzxek</a></b></p><br /><br /> - - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/app/src/main/java/org/tensorflow/lite/examples/classification/customview/RecognitionScoreView.java b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/app/src/main/java/org/tensorflow/lite/examples/classification/customview/RecognitionScoreView.java deleted file mode 100644 index 2c57f603f12200079c888793cfa40d9b10dabde3..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/app/src/main/java/org/tensorflow/lite/examples/classification/customview/RecognitionScoreView.java +++ /dev/null @@ -1,67 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -package org.tensorflow.lite.examples.classification.customview; - -import android.content.Context; -import android.graphics.Canvas; -import android.graphics.Paint; -import android.util.AttributeSet; -import android.util.TypedValue; -import android.view.View; -import java.util.List; -import org.tensorflow.lite.examples.classification.tflite.Classifier.Recognition; - -public class RecognitionScoreView extends View implements ResultsView { - private static final float TEXT_SIZE_DIP = 16; - private final float textSizePx; - private final Paint fgPaint; - private final Paint bgPaint; - private List<Recognition> results; - - public RecognitionScoreView(final Context context, final AttributeSet set) { - super(context, set); - - textSizePx = - TypedValue.applyDimension( - TypedValue.COMPLEX_UNIT_DIP, TEXT_SIZE_DIP, getResources().getDisplayMetrics()); - fgPaint = new Paint(); - fgPaint.setTextSize(textSizePx); - - bgPaint = new Paint(); - bgPaint.setColor(0xcc4285f4); - } - - @Override - public void setResults(final List<Recognition> results) { - this.results = results; - postInvalidate(); - } - - @Override - public void onDraw(final Canvas canvas) { - final int x = 10; - int y = (int) (fgPaint.getTextSize() * 1.5f); - - canvas.drawPaint(bgPaint); - - if (results != null) { - for (final Recognition recog : results) { - canvas.drawText(recog.getTitle() + ": " + recog.getConfidence(), x, y, fgPaint); - y += (int) (fgPaint.getTextSize() * 1.5f); - } - } - } -} diff --git a/spaces/cymic/Talking_Head_Anime_3/tha3/poser/modes/separable_half.py b/spaces/cymic/Talking_Head_Anime_3/tha3/poser/modes/separable_half.py deleted file mode 100644 index cab8e0b2f33189c6f731c99841348f149f86c9c3..0000000000000000000000000000000000000000 --- a/spaces/cymic/Talking_Head_Anime_3/tha3/poser/modes/separable_half.py +++ /dev/null @@ -1,365 +0,0 @@ -from enum import Enum -from typing import List, Dict, Optional - -import torch -from torch import Tensor -from torch.nn import Module -from torch.nn.functional import interpolate - -from tha3.nn.eyebrow_decomposer.eyebrow_decomposer_03 import EyebrowDecomposer03Factory, \ - EyebrowDecomposer03Args, EyebrowDecomposer03 -from tha3.nn.eyebrow_morphing_combiner.eyebrow_morphing_combiner_03 import \ - EyebrowMorphingCombiner03Factory, EyebrowMorphingCombiner03Args, EyebrowMorphingCombiner03 -from tha3.nn.face_morpher.face_morpher_09 import FaceMorpher09Factory, FaceMorpher09Args -from tha3.poser.general_poser_02 import GeneralPoser02 -from tha3.poser.poser import PoseParameterCategory, PoseParameters -from tha3.nn.editor.editor_07 import Editor07, Editor07Args -from tha3.nn.two_algo_body_rotator.two_algo_face_body_rotator_05 import TwoAlgoFaceBodyRotator05, \ - TwoAlgoFaceBodyRotator05Args -from tha3.util import torch_load -from tha3.compute.cached_computation_func import TensorListCachedComputationFunc -from tha3.compute.cached_computation_protocol import CachedComputationProtocol -from tha3.nn.nonlinearity_factory import ReLUFactory, LeakyReLUFactory -from tha3.nn.normalization import InstanceNorm2dFactory -from tha3.nn.util import BlockArgs - - -class Network(Enum): - eyebrow_decomposer = 1 - eyebrow_morphing_combiner = 2 - face_morpher = 3 - two_algo_face_body_rotator = 4 - editor = 5 - - @property - def outputs_key(self): - return f"{self.name}_outputs" - - -class Branch(Enum): - face_morphed_half = 1 - face_morphed_full = 2 - all_outputs = 3 - - -NUM_EYEBROW_PARAMS = 12 -NUM_FACE_PARAMS = 27 -NUM_ROTATION_PARAMS = 6 - - -class FiveStepPoserComputationProtocol(CachedComputationProtocol): - def __init__(self, eyebrow_morphed_image_index: int): - super().__init__() - self.eyebrow_morphed_image_index = eyebrow_morphed_image_index - self.cached_batch_0 = None - self.cached_eyebrow_decomposer_output = None - - def compute_func(self) -> TensorListCachedComputationFunc: - def func(modules: Dict[str, Module], - batch: List[Tensor], - outputs: Dict[str, List[Tensor]]): - if self.cached_batch_0 is None: - new_batch_0 = True - elif batch[0].shape[0] != self.cached_batch_0.shape[0]: - new_batch_0 = True - else: - new_batch_0 = torch.max((batch[0] - self.cached_batch_0).abs()).item() > 0 - if not new_batch_0: - outputs[Network.eyebrow_decomposer.outputs_key] = self.cached_eyebrow_decomposer_output - output = self.get_output(Branch.all_outputs.name, modules, batch, outputs) - if new_batch_0: - self.cached_batch_0 = batch[0] - self.cached_eyebrow_decomposer_output = outputs[Network.eyebrow_decomposer.outputs_key] - return output - - return func - - def compute_output(self, key: str, modules: Dict[str, Module], batch: List[Tensor], - outputs: Dict[str, List[Tensor]]) -> List[Tensor]: - if key == Network.eyebrow_decomposer.outputs_key: - input_image = batch[0][:, :, 64:192, 64 + 128:192 + 128] - return modules[Network.eyebrow_decomposer.name].forward(input_image) - elif key == Network.eyebrow_morphing_combiner.outputs_key: - eyebrow_decomposer_output = self.get_output(Network.eyebrow_decomposer.outputs_key, modules, batch, outputs) - background_layer = eyebrow_decomposer_output[EyebrowDecomposer03.BACKGROUND_LAYER_INDEX] - eyebrow_layer = eyebrow_decomposer_output[EyebrowDecomposer03.EYEBROW_LAYER_INDEX] - eyebrow_pose = batch[1][:, :NUM_EYEBROW_PARAMS] - return modules[Network.eyebrow_morphing_combiner.name].forward( - background_layer, - eyebrow_layer, - eyebrow_pose) - elif key == Network.face_morpher.outputs_key: - eyebrow_morphing_combiner_output = self.get_output( - Network.eyebrow_morphing_combiner.outputs_key, modules, batch, outputs) - eyebrow_morphed_image = eyebrow_morphing_combiner_output[self.eyebrow_morphed_image_index] - input_image = batch[0][:, :, 32:32 + 192, (32 + 128):(32 + 192 + 128)].clone() - input_image[:, :, 32:32 + 128, 32:32 + 128] = eyebrow_morphed_image - face_pose = batch[1][:, NUM_EYEBROW_PARAMS:NUM_EYEBROW_PARAMS + NUM_FACE_PARAMS] - return modules[Network.face_morpher.name].forward(input_image, face_pose) - elif key == Branch.face_morphed_full.name: - face_morpher_output = self.get_output(Network.face_morpher.outputs_key, modules, batch, outputs) - face_morphed_image = face_morpher_output[0] - input_image = batch[0].clone() - input_image[:, :, 32:32 + 192, 32 + 128:32 + 192 + 128] = face_morphed_image - return [input_image] - elif key == Branch.face_morphed_half.name: - face_morphed_full = self.get_output(Branch.face_morphed_full.name, modules, batch, outputs)[0] - return [ - interpolate(face_morphed_full, size=(256, 256), mode='bilinear', align_corners=False) - ] - elif key == Network.two_algo_face_body_rotator.outputs_key: - face_morphed_half = self.get_output(Branch.face_morphed_half.name, modules, batch, outputs)[0] - rotation_pose = batch[1][:, NUM_EYEBROW_PARAMS + NUM_FACE_PARAMS:] - return modules[Network.two_algo_face_body_rotator.name].forward(face_morphed_half, rotation_pose) - elif key == Network.editor.outputs_key: - input_original_image = self.get_output(Branch.face_morphed_full.name, modules, batch, outputs)[0] - rotator_outputs = self.get_output( - Network.two_algo_face_body_rotator.outputs_key, modules, batch, outputs) - half_warped_image = rotator_outputs[TwoAlgoFaceBodyRotator05.WARPED_IMAGE_INDEX] - full_warped_image = interpolate( - half_warped_image, size=(512, 512), mode='bilinear', align_corners=False) - half_grid_change = rotator_outputs[TwoAlgoFaceBodyRotator05.GRID_CHANGE_INDEX] - full_grid_change = interpolate( - half_grid_change, size=(512, 512), mode='bilinear', align_corners=False) - rotation_pose = batch[1][:, NUM_EYEBROW_PARAMS + NUM_FACE_PARAMS:] - return modules[Network.editor.name].forward( - input_original_image, full_warped_image, full_grid_change, rotation_pose) - elif key == Branch.all_outputs.name: - editor_output = self.get_output(Network.editor.outputs_key, modules, batch, outputs) - rotater_output = self.get_output(Network.two_algo_face_body_rotator.outputs_key, modules, batch, outputs) - face_morpher_output = self.get_output(Network.face_morpher.outputs_key, modules, batch, outputs) - eyebrow_morphing_combiner_output = self.get_output( - Network.eyebrow_morphing_combiner.outputs_key, modules, batch, outputs) - eyebrow_decomposer_output = self.get_output( - Network.eyebrow_decomposer.outputs_key, modules, batch, outputs) - output = editor_output \ - + rotater_output \ - + face_morpher_output \ - + eyebrow_morphing_combiner_output \ - + eyebrow_decomposer_output - return output - else: - raise RuntimeError("Unsupported key: " + key) - - -def load_eyebrow_decomposer(file_name: str): - factory = EyebrowDecomposer03Factory( - EyebrowDecomposer03Args( - image_size=128, - image_channels=4, - start_channels=64, - bottleneck_image_size=16, - num_bottleneck_blocks=6, - max_channels=512, - block_args=BlockArgs( - initialization_method='he', - use_spectral_norm=False, - normalization_layer_factory=InstanceNorm2dFactory(), - nonlinearity_factory=ReLUFactory(inplace=True)))) - print("Loading the eyebrow decomposer ... ", end="") - module = factory.create().half() - module.load_state_dict(torch_load(file_name)) - print("DONE!!!") - return module - - -def load_eyebrow_morphing_combiner(file_name: str): - factory = EyebrowMorphingCombiner03Factory( - EyebrowMorphingCombiner03Args( - image_size=128, - image_channels=4, - start_channels=64, - num_pose_params=12, - bottleneck_image_size=16, - num_bottleneck_blocks=6, - max_channels=512, - block_args=BlockArgs( - initialization_method='he', - use_spectral_norm=False, - normalization_layer_factory=InstanceNorm2dFactory(), - nonlinearity_factory=ReLUFactory(inplace=True)))) - print("Loading the eyebrow morphing conbiner ... ", end="") - module = factory.create().half() - module.load_state_dict(torch_load(file_name)) - print("DONE!!!") - return module - - -def load_face_morpher(file_name: str): - factory = FaceMorpher09Factory( - FaceMorpher09Args( - image_size=192, - image_channels=4, - num_pose_params=27, - start_channels=64, - bottleneck_image_size=24, - num_bottleneck_blocks=6, - max_channels=512, - block_args=BlockArgs( - initialization_method='he', - use_spectral_norm=False, - normalization_layer_factory=InstanceNorm2dFactory(), - nonlinearity_factory=ReLUFactory(inplace=False)))) - print("Loading the face morpher ... ", end="") - module = factory.create().half() - module.load_state_dict(torch_load(file_name)) - print("DONE!!!") - return module - - -def load_two_algo_generator(file_name) -> Module: - module = TwoAlgoFaceBodyRotator05( - TwoAlgoFaceBodyRotator05Args( - image_size=256, - image_channels=4, - start_channels=64, - num_pose_params=6, - bottleneck_image_size=32, - num_bottleneck_blocks=6, - max_channels=512, - upsample_mode='nearest', - use_separable_convolution=True, - block_args=BlockArgs( - initialization_method='he', - use_spectral_norm=False, - normalization_layer_factory=InstanceNorm2dFactory(), - nonlinearity_factory=LeakyReLUFactory(inplace=False, negative_slope=0.1)))).half() - print("Loading the face-body rotator ... ", end="") - module.load_state_dict(torch_load(file_name)) - print("DONE!!!") - return module - - -def load_editor(file_name) -> Module: - module = Editor07( - Editor07Args( - image_size=512, - image_channels=4, - num_pose_params=6, - start_channels=32, - bottleneck_image_size=64, - num_bottleneck_blocks=6, - max_channels=512, - upsampling_mode='nearest', - use_separable_convolution=True, - block_args=BlockArgs( - initialization_method='he', - use_spectral_norm=False, - normalization_layer_factory=InstanceNorm2dFactory(), - nonlinearity_factory=LeakyReLUFactory(inplace=False, negative_slope=0.1)))).half() - print("Loading the combiner ... ", end="") - module.load_state_dict(torch_load(file_name)) - print("DONE!!!") - return module - - -def get_pose_parameters(): - return PoseParameters.Builder() \ - .add_parameter_group("eyebrow_troubled", PoseParameterCategory.EYEBROW, arity=2) \ - .add_parameter_group("eyebrow_angry", PoseParameterCategory.EYEBROW, arity=2) \ - .add_parameter_group("eyebrow_lowered", PoseParameterCategory.EYEBROW, arity=2) \ - .add_parameter_group("eyebrow_raised", PoseParameterCategory.EYEBROW, arity=2) \ - .add_parameter_group("eyebrow_happy", PoseParameterCategory.EYEBROW, arity=2) \ - .add_parameter_group("eyebrow_serious", PoseParameterCategory.EYEBROW, arity=2) \ - .add_parameter_group("eye_wink", PoseParameterCategory.EYE, arity=2) \ - .add_parameter_group("eye_happy_wink", PoseParameterCategory.EYE, arity=2) \ - .add_parameter_group("eye_surprised", PoseParameterCategory.EYE, arity=2) \ - .add_parameter_group("eye_relaxed", PoseParameterCategory.EYE, arity=2) \ - .add_parameter_group("eye_unimpressed", PoseParameterCategory.EYE, arity=2) \ - .add_parameter_group("eye_raised_lower_eyelid", PoseParameterCategory.EYE, arity=2) \ - .add_parameter_group("iris_small", PoseParameterCategory.IRIS_MORPH, arity=2) \ - .add_parameter_group("mouth_aaa", PoseParameterCategory.MOUTH, arity=1, default_value=1.0) \ - .add_parameter_group("mouth_iii", PoseParameterCategory.MOUTH, arity=1) \ - .add_parameter_group("mouth_uuu", PoseParameterCategory.MOUTH, arity=1) \ - .add_parameter_group("mouth_eee", PoseParameterCategory.MOUTH, arity=1) \ - .add_parameter_group("mouth_ooo", PoseParameterCategory.MOUTH, arity=1) \ - .add_parameter_group("mouth_delta", PoseParameterCategory.MOUTH, arity=1) \ - .add_parameter_group("mouth_lowered_corner", PoseParameterCategory.MOUTH, arity=2) \ - .add_parameter_group("mouth_raised_corner", PoseParameterCategory.MOUTH, arity=2) \ - .add_parameter_group("mouth_smirk", PoseParameterCategory.MOUTH, arity=1) \ - .add_parameter_group("iris_rotation_x", PoseParameterCategory.IRIS_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("iris_rotation_y", PoseParameterCategory.IRIS_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("head_x", PoseParameterCategory.FACE_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("head_y", PoseParameterCategory.FACE_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("neck_z", PoseParameterCategory.FACE_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("body_y", PoseParameterCategory.BODY_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("body_z", PoseParameterCategory.BODY_ROTATION, arity=1, range=(-1.0, 1.0)) \ - .add_parameter_group("breathing", PoseParameterCategory.BREATHING, arity=1, range=(0.0, 1.0)) \ - .build() - - -def create_poser( - device: torch.device, - module_file_names: Optional[Dict[str, str]] = None, - eyebrow_morphed_image_index: int = EyebrowMorphingCombiner03.EYEBROW_IMAGE_NO_COMBINE_ALPHA_INDEX, - default_output_index: int = 0) -> GeneralPoser02: - if module_file_names is None: - module_file_names = {} - if Network.eyebrow_decomposer.name not in module_file_names: - dir = "data/models/separable_half" - file_name = dir + "/eyebrow_decomposer.pt" - module_file_names[Network.eyebrow_decomposer.name] = file_name - if Network.eyebrow_morphing_combiner.name not in module_file_names: - dir = "data/models/separable_half" - file_name = dir + "/eyebrow_morphing_combiner.pt" - module_file_names[Network.eyebrow_morphing_combiner.name] = file_name - if Network.face_morpher.name not in module_file_names: - dir = "data/models/separable_half" - file_name = dir + "/face_morpher.pt" - module_file_names[Network.face_morpher.name] = file_name - if Network.two_algo_face_body_rotator.name not in module_file_names: - dir = "data/models/separable_half" - file_name = dir + "/two_algo_face_body_rotator.pt" - module_file_names[Network.two_algo_face_body_rotator.name] = file_name - if Network.editor.name not in module_file_names: - dir = "data/models/separable_half" - file_name = dir + "/editor.pt" - module_file_names[Network.editor.name] = file_name - - loaders = { - Network.eyebrow_decomposer.name: - lambda: load_eyebrow_decomposer(module_file_names[Network.eyebrow_decomposer.name]), - Network.eyebrow_morphing_combiner.name: - lambda: load_eyebrow_morphing_combiner(module_file_names[Network.eyebrow_morphing_combiner.name]), - Network.face_morpher.name: - lambda: load_face_morpher(module_file_names[Network.face_morpher.name]), - Network.two_algo_face_body_rotator.name: - lambda: load_two_algo_generator(module_file_names[Network.two_algo_face_body_rotator.name]), - Network.editor.name: - lambda: load_editor(module_file_names[Network.editor.name]), - } - return GeneralPoser02( - image_size=512, - module_loaders=loaders, - pose_parameters=get_pose_parameters().get_pose_parameter_groups(), - output_list_func=FiveStepPoserComputationProtocol(eyebrow_morphed_image_index).compute_func(), - subrect=None, - device=device, - output_length=29, - dtype=torch.half, - default_output_index=default_output_index) - - -if __name__ == "__main__": - device = torch.device('cuda') - poser = create_poser(device) - - image = torch.zeros(1, 4, 512, 512, device=device, dtype=torch.half) - pose = torch.zeros(1, 45, device=device, dtype=torch.half) - - repeat = 100 - acc = 0.0 - for i in range(repeat + 2): - start = torch.cuda.Event(enable_timing=True) - end = torch.cuda.Event(enable_timing=True) - - start.record() - poser.pose(image, pose) - end.record() - torch.cuda.synchronize() - if i >= 2: - elapsed_time = start.elapsed_time(end) - print("%d:" % i, elapsed_time) - acc = acc + elapsed_time - - print("average:", acc / repeat) diff --git a/spaces/daddyjin/TalkingFaceGeneration/FONT/modules/conv.py b/spaces/daddyjin/TalkingFaceGeneration/FONT/modules/conv.py deleted file mode 100644 index ed83da00cb199e027ef217fd360352d91a7891ff..0000000000000000000000000000000000000000 --- a/spaces/daddyjin/TalkingFaceGeneration/FONT/modules/conv.py +++ /dev/null @@ -1,44 +0,0 @@ -import torch -from torch import nn -from torch.nn import functional as F - -class Conv2d(nn.Module): - def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, *args, **kwargs): - super().__init__(*args, **kwargs) - self.conv_block = nn.Sequential( - nn.Conv2d(cin, cout, kernel_size, stride, padding), - nn.BatchNorm2d(cout) - ) - self.act = nn.ReLU() - self.residual = residual - - def forward(self, x): - out = self.conv_block(x) - if self.residual: - out += x - return self.act(out) - -class nonorm_Conv2d(nn.Module): - def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, *args, **kwargs): - super().__init__(*args, **kwargs) - self.conv_block = nn.Sequential( - nn.Conv2d(cin, cout, kernel_size, stride, padding), - ) - self.act = nn.LeakyReLU(0.01, inplace=True) - - def forward(self, x): - out = self.conv_block(x) - return self.act(out) - -class Conv2dTranspose(nn.Module): - def __init__(self, cin, cout, kernel_size, stride, padding, output_padding=0, *args, **kwargs): - super().__init__(*args, **kwargs) - self.conv_block = nn.Sequential( - nn.ConvTranspose2d(cin, cout, kernel_size, stride, padding, output_padding), - nn.BatchNorm2d(cout) - ) - self.act = nn.ReLU() - - def forward(self, x): - out = self.conv_block(x) - return self.act(out) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-49c152ed.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-49c152ed.css deleted file mode 100644 index 0af4e02cc7c84a94d8719b02e4d6d8d67e582557..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-49c152ed.css +++ /dev/null @@ -1 +0,0 @@ -.wrap.svelte-1cl284s{display:flex;flex-direction:column;width:100%}.head.svelte-1cl284s{display:flex;justify-content:space-between}input[type=number].svelte-1cl284s{display:block;position:relative;outline:none!important;box-shadow:var(--input-shadow);border:var(--input-border-width) solid var(--input-border-color);border-radius:var(--input-radius);background:var(--input-background-fill);padding:var(--size-2) var(--size-2);height:var(--size-6);color:var(--body-text-color);font-size:var(--input-text-size);line-height:var(--line-sm);text-align:center}input.svelte-1cl284s:disabled{-webkit-text-fill-color:var(--body-text-color);-webkit-opacity:1;opacity:1}input[type=number].svelte-1cl284s:focus{box-shadow:var(--input-shadow-focus);border-color:var(--input-border-color-focus)}input.svelte-1cl284s::placeholder{color:var(--input-placeholder-color)}input[type=range].svelte-1cl284s{width:100%;accent-color:var(--slider-color)}input[disabled].svelte-1cl284s{cursor:not-allowed} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-5aed9d46.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-5aed9d46.js deleted file mode 100644 index 221ce8ad56944d8ef7479abafb5c0aaa89f62a04..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-5aed9d46.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as me,e as ge,s as be,y as we,o as A,P as Fe,h as C,p as Q,w as B,r as U,u as I,v as W,k as E,C as Le,a3 as Ve,F,G as L,H as V,a4 as Xe,N as le,E as K,I as Z,m as z,g as d,K as N,Y as R,j as S,ar as qe,Z as de,X as P,B as Me,t as ke,x as ve,V as Pe,ae as Te,Q as Ke,R as Qe}from"./index-39fce9e2.js";import{B as Ue}from"./Button-79f6e3bf.js";import{B as We}from"./BlockLabel-b1428685.js";import{I as Ye}from"./IconButton-0ac328a0.js";import{E as Ze}from"./Empty-16d6169a.js";import{u as Je,S as Oe}from"./ShareButton-c9a8cbaf.js";import{n as te}from"./ModifyUpload.svelte_svelte_type_style_lang-14b768c9.js";import{M as ye}from"./ModifyUpload-02c07c98.js";import{D as pe}from"./Download-0afd7f1a.js";import{I as je}from"./Image-e7c48875.js";async function xe(l){return l?`<div style="display: flex; flex-wrap: wrap; gap: 16px">${(await Promise.all(l.map(async([e,n])=>e===null?"":await Je(e.data,"url")))).map(e=>`<img src="${e}" style="height: 400px" />`).join("")}</div>`:""}const{window:Be}=Ve;function ne(l,t,e){const n=l.slice();return n[38]=t[e][0],n[39]=t[e][1],n[41]=e,n}function ie(l,t,e){const n=l.slice();return n[38]=t[e],n[42]=t,n[41]=e,n}function oe(l){let t,e;return t=new We({props:{show_label:l[0],Icon:je,label:l[1]||"Gallery"}}),{c(){F(t.$$.fragment)},m(n,i){L(t,n,i),e=!0},p(n,i){const a={};i[0]&1&&(a.show_label=n[0]),i[0]&2&&(a.label=n[1]||"Gallery"),t.$set(a)},i(n){e||(B(t.$$.fragment,n),e=!0)},o(n){I(t.$$.fragment,n),e=!1},d(n){V(t,n)}}}function $e(l){let t,e,n,i,a,_,o=l[11]!==null&&l[6]&&re(l),r=l[8]&&ue(l),g=Z(l[10]),c=[];for(let u=0;u<g.length;u+=1)c[u]=ce(ne(l,g,u));return{c(){o&&o.c(),t=A(),e=z("div"),n=z("div"),r&&r.c(),i=A();for(let u=0;u<c.length;u+=1)c[u].c();d(n,"class","grid-container svelte-1b19cri"),N(n,"--grid-cols",l[3]),N(n,"--grid-rows",l[4]),N(n,"--object-fit",l[7]),N(n,"height",l[5]),R(n,"pt-6",l[0]),d(e,"class","grid-wrap svelte-1b19cri"),we(()=>l[33].call(e)),R(e,"fixed-height",!l[5]||l[5]=="auto")},m(u,s){o&&o.m(u,s),C(u,t,s),C(u,e,s),S(e,n),r&&r.m(n,null),S(n,i);for(let h=0;h<c.length;h+=1)c[h]&&c[h].m(n,null);a=qe(e,l[33].bind(e)),_=!0},p(u,s){if(u[11]!==null&&u[6]?o?(o.p(u,s),s[0]&2112&&B(o,1)):(o=re(u),o.c(),B(o,1),o.m(t.parentNode,t)):o&&(U(),I(o,1,1,()=>{o=null}),W()),u[8]?r?(r.p(u,s),s[0]&256&&B(r,1)):(r=ue(u),r.c(),B(r,1),r.m(n,i)):r&&(U(),I(r,1,1,()=>{r=null}),W()),s[0]&3072){g=Z(u[10]);let h;for(h=0;h<g.length;h+=1){const k=ne(u,g,h);c[h]?c[h].p(k,s):(c[h]=ce(k),c[h].c(),c[h].m(n,null))}for(;h<c.length;h+=1)c[h].d(1);c.length=g.length}(!_||s[0]&8)&&N(n,"--grid-cols",u[3]),(!_||s[0]&16)&&N(n,"--grid-rows",u[4]),(!_||s[0]&128)&&N(n,"--object-fit",u[7]),(!_||s[0]&32)&&N(n,"height",u[5]),(!_||s[0]&1)&&R(n,"pt-6",u[0]),(!_||s[0]&32)&&R(e,"fixed-height",!u[5]||u[5]=="auto")},i(u){_||(B(o),B(r),_=!0)},o(u){I(o),I(r),_=!1},d(u){u&&(E(t),E(e)),o&&o.d(u),r&&r.d(),de(c,u),a()}}}function el(l){let t,e;return t=new Ze({props:{unpadded_box:!0,size:"large",$$slots:{default:[ll]},$$scope:{ctx:l}}}),{c(){F(t.$$.fragment)},m(n,i){L(t,n,i),e=!0},p(n,i){const a={};i[1]&4096&&(a.$$scope={dirty:i,ctx:n}),t.$set(a)},i(n){e||(B(t.$$.fragment,n),e=!0)},o(n){I(t.$$.fragment,n),e=!1},d(n){V(t,n)}}}function re(l){let t,e,n,i,a,_,o,r,g,c,u,s,h,k,X,v=l[9]&&ae(l);i=new ye({props:{absolute:!1}}),i.$on("clear",l[25]);let D=l[10][l[11]][1]&&se(l),H=Z(l[10]),j=[];for(let b=0;b<H.length;b+=1)j[b]=fe(ie(l,H,b));return{c(){t=z("div"),e=z("div"),v&&v.c(),n=A(),F(i.$$.fragment),a=A(),_=z("img"),c=A(),D&&D.c(),u=A(),s=z("div");for(let b=0;b<j.length;b+=1)j[b].c();d(e,"class","icon-buttons svelte-1b19cri"),d(_,"data-testid","detailed-image"),P(_.src,o=l[10][l[11]][0].data)||d(_,"src",o),d(_,"alt",r=l[10][l[11]][1]||""),d(_,"title",g=l[10][l[11]][1]||null),N(_,"height","calc(100% - "+(l[10][l[11]][1]?"80px":"60px")+")"),d(_,"class","svelte-1b19cri"),R(_,"with-caption",!!l[10][l[11]][1]),d(s,"class","thumbnails scroll-hide svelte-1b19cri"),d(s,"data-testid","container_el"),d(t,"class","preview svelte-1b19cri")},m(b,G){C(b,t,G),S(t,e),v&&v.m(e,null),S(e,n),L(i,e,null),S(t,a),S(t,_),S(t,c),D&&D.m(t,null),S(t,u),S(t,s);for(let w=0;w<j.length;w+=1)j[w]&&j[w].m(s,null);l[29](s),h=!0,k||(X=[Q(_,"click",l[26]),Q(t,"keydown",l[17])],k=!0)},p(b,G){if(b[9]?v?(v.p(b,G),G[0]&512&&B(v,1)):(v=ae(b),v.c(),B(v,1),v.m(e,n)):v&&(U(),I(v,1,1,()=>{v=null}),W()),(!h||G[0]&3072&&!P(_.src,o=b[10][b[11]][0].data))&&d(_,"src",o),(!h||G[0]&3072&&r!==(r=b[10][b[11]][1]||""))&&d(_,"alt",r),(!h||G[0]&3072&&g!==(g=b[10][b[11]][1]||null))&&d(_,"title",g),(!h||G[0]&3072)&&N(_,"height","calc(100% - "+(b[10][b[11]][1]?"80px":"60px")+")"),(!h||G[0]&3072)&&R(_,"with-caption",!!b[10][b[11]][1]),b[10][b[11]][1]?D?D.p(b,G):(D=se(b),D.c(),D.m(t,u)):D&&(D.d(1),D=null),G[0]&7168){H=Z(b[10]);let w;for(w=0;w<H.length;w+=1){const q=ie(b,H,w);j[w]?j[w].p(q,G):(j[w]=fe(q),j[w].c(),j[w].m(s,null))}for(;w<j.length;w+=1)j[w].d(1);j.length=H.length}},i(b){h||(B(v),B(i.$$.fragment,b),h=!0)},o(b){I(v),I(i.$$.fragment,b),h=!1},d(b){b&&E(t),v&&v.d(),V(i),D&&D.d(),de(j,b),l[29](null),k=!1,Me(X)}}}function ae(l){let t,e,n,i;return e=new Ye({props:{Icon:pe,label:"Download"}}),{c(){t=z("a"),F(e.$$.fragment),d(t,"href",n=he(l[2][l[11]])),d(t,"target",window.__is_colab__?"_blank":null),d(t,"download","image"),d(t,"class","svelte-1b19cri")},m(a,_){C(a,t,_),L(e,t,null),i=!0},p(a,_){(!i||_[0]&2052&&n!==(n=he(a[2][a[11]])))&&d(t,"href",n)},i(a){i||(B(e.$$.fragment,a),i=!0)},o(a){I(e.$$.fragment,a),i=!1},d(a){a&&E(t),V(e)}}}function se(l){let t,e=l[10][l[11]][1]+"",n;return{c(){t=z("div"),n=ke(e),d(t,"class","caption svelte-1b19cri")},m(i,a){C(i,t,a),S(t,n)},p(i,a){a[0]&3072&&e!==(e=i[10][i[11]][1]+"")&&ve(n,e)},d(i){i&&E(t)}}}function fe(l){let t,e,n,i,a,_,o=l[41],r,g;const c=()=>l[27](t,o),u=()=>l[27](null,o);function s(){return l[28](l[41])}return{c(){t=z("button"),e=z("img"),_=A(),P(e.src,n=l[38][0].data)||d(e,"src",n),d(e,"title",i=l[38][1]||null),d(e,"alt",a=l[38][1]||null),d(e,"class","svelte-1b19cri"),d(t,"class","thumbnail-item thumbnail-small svelte-1b19cri"),R(t,"selected",l[11]===l[41])},m(h,k){C(h,t,k),S(t,e),S(t,_),c(),r||(g=Q(t,"click",s),r=!0)},p(h,k){l=h,k[0]&1024&&!P(e.src,n=l[38][0].data)&&d(e,"src",n),k[0]&1024&&i!==(i=l[38][1]||null)&&d(e,"title",i),k[0]&1024&&a!==(a=l[38][1]||null)&&d(e,"alt",a),o!==l[41]&&(u(),o=l[41],c()),k[0]&2048&&R(t,"selected",l[11]===l[41])},d(h){h&&E(t),u(),r=!1,g()}}}function ue(l){let t,e,n;return e=new Oe({props:{value:l[10],formatter:xe}}),e.$on("share",l[30]),e.$on("error",l[31]),{c(){t=z("div"),F(e.$$.fragment),d(t,"class","icon-button svelte-1b19cri")},m(i,a){C(i,t,a),L(e,t,null),n=!0},p(i,a){const _={};a[0]&1024&&(_.value=i[10]),e.$set(_)},i(i){n||(B(e.$$.fragment,i),n=!0)},o(i){I(e.$$.fragment,i),n=!1},d(i){i&&E(t),V(e)}}}function _e(l){let t,e=l[39]+"",n;return{c(){t=z("div"),n=ke(e),d(t,"class","caption-label svelte-1b19cri")},m(i,a){C(i,t,a),S(t,n)},p(i,a){a[0]&1024&&e!==(e=i[39]+"")&&ve(n,e)},d(i){i&&E(t)}}}function ce(l){let t,e,n,i,a,_,o,r,g=l[39]&&_e(l);function c(){return l[32](l[41])}return{c(){t=z("button"),e=z("img"),a=A(),g&&g.c(),_=A(),d(e,"alt",n=l[39]||""),P(e.src,i=typeof l[38]=="string"?l[38]:l[38].data)||d(e,"src",i),d(e,"class","svelte-1b19cri"),d(t,"class","thumbnail-item thumbnail-lg svelte-1b19cri"),R(t,"selected",l[11]===l[41])},m(u,s){C(u,t,s),S(t,e),S(t,a),g&&g.m(t,null),S(t,_),o||(r=Q(t,"click",c),o=!0)},p(u,s){l=u,s[0]&1024&&n!==(n=l[39]||"")&&d(e,"alt",n),s[0]&1024&&!P(e.src,i=typeof l[38]=="string"?l[38]:l[38].data)&&d(e,"src",i),l[39]?g?g.p(l,s):(g=_e(l),g.c(),g.m(t,_)):g&&(g.d(1),g=null),s[0]&2048&&R(t,"selected",l[11]===l[41])},d(u){u&&E(t),g&&g.d(),o=!1,r()}}}function ll(l){let t,e;return t=new je({}),{c(){F(t.$$.fragment)},m(n,i){L(t,n,i),e=!0},i(n){e||(B(t.$$.fragment,n),e=!0)},o(n){I(t.$$.fragment,n),e=!1},d(n){V(t,n)}}}function tl(l){let t,e,n,i,a,_,o;we(l[24]);let r=l[0]&&oe(l);const g=[el,$e],c=[];function u(s,h){return s[2]===null||s[10]===null||s[10].length===0?0:1}return e=u(l),n=c[e]=g[e](l),{c(){r&&r.c(),t=A(),n.c(),i=Fe()},m(s,h){r&&r.m(s,h),C(s,t,h),c[e].m(s,h),C(s,i,h),a=!0,_||(o=Q(Be,"resize",l[24]),_=!0)},p(s,h){s[0]?r?(r.p(s,h),h[0]&1&&B(r,1)):(r=oe(s),r.c(),B(r,1),r.m(t.parentNode,t)):r&&(U(),I(r,1,1,()=>{r=null}),W());let k=e;e=u(s),e===k?c[e].p(s,h):(U(),I(c[k],1,1,()=>{c[k]=null}),W(),n=c[e],n?n.p(s,h):(n=c[e]=g[e](s),n.c()),B(n,1),n.m(i.parentNode,i))},i(s){a||(B(r),B(n),a=!0)},o(s){I(r),I(n),a=!1},d(s){s&&(E(t),E(i)),r&&r.d(s),c[e].d(s),_=!1,o()}}}function nl(l){return typeof l=="object"&&l!==null&&"data"in l}function he(l){return nl(l)?l.data:typeof l=="string"?l:""}function il(l,t,e){let n,i,{show_label:a=!0}=t,{label:_}=t,{root:o=""}=t,{root_url:r=null}=t,{value:g=null}=t,{grid_cols:c=[2]}=t,{grid_rows:u=void 0}=t,{height:s="auto"}=t,{preview:h}=t,{allow_preview:k=!0}=t,{object_fit:X="cover"}=t,{show_share_button:v=!1}=t,{show_download_button:D=!1}=t;const H=Le();let j=!0,b=null,G=g,w=h&&g?.length?0:null,q=w;function Y(f){const T=f.target,y=f.clientX,p=T.offsetWidth/2;y<p?e(11,w=n):e(11,w=i)}function J(f){switch(f.code){case"Escape":f.preventDefault(),e(11,w=null);break;case"ArrowLeft":f.preventDefault(),e(11,w=n);break;case"ArrowRight":f.preventDefault(),e(11,w=i);break}}let m=[],M;async function De(f){if(typeof f!="number")return;await Xe(),m[f].focus();const{left:T,width:y}=M.getBoundingClientRect(),{left:$,width:p}=m[f].getBoundingClientRect(),ee=$-T+p/2-y/2+M.scrollLeft;M?.scrollTo({left:ee<0?0:ee,behavior:"smooth"})}let O=0,x=0;function Ge(){e(15,x=Be.innerHeight)}const Ie=()=>e(11,w=null),Se=f=>Y(f);function ze(f,T){le[f?"unshift":"push"](()=>{m[T]=f,e(12,m)})}const Ce=f=>e(11,w=f);function Ee(f){le[f?"unshift":"push"](()=>{M=f,e(13,M)})}function He(f){K.call(this,l,f)}function Ae(f){K.call(this,l,f)}const Ne=f=>e(11,w=f);function Re(){O=this.clientHeight,e(14,O)}return l.$$set=f=>{"show_label"in f&&e(0,a=f.show_label),"label"in f&&e(1,_=f.label),"root"in f&&e(18,o=f.root),"root_url"in f&&e(19,r=f.root_url),"value"in f&&e(2,g=f.value),"grid_cols"in f&&e(3,c=f.grid_cols),"grid_rows"in f&&e(4,u=f.grid_rows),"height"in f&&e(5,s=f.height),"preview"in f&&e(20,h=f.preview),"allow_preview"in f&&e(6,k=f.allow_preview),"object_fit"in f&&e(7,X=f.object_fit),"show_share_button"in f&&e(8,v=f.show_share_button),"show_download_button"in f&&e(9,D=f.show_download_button)},l.$$.update=()=>{l.$$.dirty[0]&2097156&&e(21,j=g==null||g.length==0?!0:j),l.$$.dirty[0]&786436&&e(10,b=g===null?null:g.map(f=>Array.isArray(f)?[te(f[0],o,r),f[1]]:[te(f,o,r),null])),l.$$.dirty[0]&7342084&&G!==g&&(j?(e(11,w=h&&g?.length?0:null),e(21,j=!1)):e(11,w=w!==null&&g!==null&&w<g.length?w:null),e(22,G=g)),l.$$.dirty[0]&3072&&(n=((w??0)+(b?.length??0)-1)%(b?.length??0)),l.$$.dirty[0]&3072&&(i=((w??0)+1)%(b?.length??0)),l.$$.dirty[0]&8391680&&w!==q&&(e(23,q=w),w!==null&&H("select",{index:w,value:b?.[w][1]})),l.$$.dirty[0]&2112&&k&&De(w)},[a,_,g,c,u,s,k,X,v,D,b,w,m,M,O,x,Y,J,o,r,h,j,G,q,Ge,Ie,Se,ze,Ce,Ee,He,Ae,Ne,Re]}class ol extends me{constructor(t){super(),ge(this,t,il,tl,be,{show_label:0,label:1,root:18,root_url:19,value:2,grid_cols:3,grid_rows:4,height:5,preview:20,allow_preview:6,object_fit:7,show_share_button:8,show_download_button:9},null,[-1,-1])}}function rl(l){let t,e,n,i;const a=[l[0]];let _={};for(let o=0;o<a.length;o+=1)_=Pe(_,a[o]);return t=new Te({props:_}),n=new ol({props:{label:l[2],value:l[8],show_label:l[1],root:l[3],root_url:l[4],grid_cols:l[12],grid_rows:l[13],height:l[14],preview:l[15],object_fit:l[17],allow_preview:l[16],show_share_button:l[18],show_download_button:l[19]}}),n.$on("select",l[20]),n.$on("share",l[21]),n.$on("error",l[22]),{c(){F(t.$$.fragment),e=A(),F(n.$$.fragment)},m(o,r){L(t,o,r),C(o,e,r),L(n,o,r),i=!0},p(o,r){const g=r&1?Ke(a,[Qe(o[0])]):{};t.$set(g);const c={};r&4&&(c.label=o[2]),r&256&&(c.value=o[8]),r&2&&(c.show_label=o[1]),r&8&&(c.root=o[3]),r&16&&(c.root_url=o[4]),r&4096&&(c.grid_cols=o[12]),r&8192&&(c.grid_rows=o[13]),r&16384&&(c.height=o[14]),r&32768&&(c.preview=o[15]),r&131072&&(c.object_fit=o[17]),r&65536&&(c.allow_preview=o[16]),r&262144&&(c.show_share_button=o[18]),r&524288&&(c.show_download_button=o[19]),n.$set(c)},i(o){i||(B(t.$$.fragment,o),B(n.$$.fragment,o),i=!0)},o(o){I(t.$$.fragment,o),I(n.$$.fragment,o),i=!1},d(o){o&&E(e),V(t,o),V(n,o)}}}function al(l){let t,e;return t=new Ue({props:{visible:l[7],variant:"solid",padding:!1,elem_id:l[5],elem_classes:l[6],container:l[9],scale:l[10],min_width:l[11],allow_overflow:!1,height:typeof l[14]=="number"?l[14]:void 0,$$slots:{default:[rl]},$$scope:{ctx:l}}}),{c(){F(t.$$.fragment)},m(n,i){L(t,n,i),e=!0},p(n,[i]){const a={};i&128&&(a.visible=n[7]),i&32&&(a.elem_id=n[5]),i&64&&(a.elem_classes=n[6]),i&512&&(a.container=n[9]),i&1024&&(a.scale=n[10]),i&2048&&(a.min_width=n[11]),i&16384&&(a.height=typeof n[14]=="number"?n[14]:void 0),i&9433375&&(a.$$scope={dirty:i,ctx:n}),t.$set(a)},i(n){e||(B(t.$$.fragment,n),e=!0)},o(n){I(t.$$.fragment,n),e=!1},d(n){V(t,n)}}}function sl(l,t,e){let{loading_status:n}=t,{show_label:i}=t,{label:a}=t,{root:_}=t,{root_url:o}=t,{elem_id:r=""}=t,{elem_classes:g=[]}=t,{visible:c=!0}=t,{value:u=null}=t,{container:s=!0}=t,{scale:h=null}=t,{min_width:k=void 0}=t,{grid_cols:X=[2]}=t,{grid_rows:v=void 0}=t,{height:D="auto"}=t,{preview:H}=t,{allow_preview:j=!0}=t,{object_fit:b="cover"}=t,{show_share_button:G=!1}=t,{show_download_button:w=!1}=t;function q(m){K.call(this,l,m)}function Y(m){K.call(this,l,m)}function J(m){K.call(this,l,m)}return l.$$set=m=>{"loading_status"in m&&e(0,n=m.loading_status),"show_label"in m&&e(1,i=m.show_label),"label"in m&&e(2,a=m.label),"root"in m&&e(3,_=m.root),"root_url"in m&&e(4,o=m.root_url),"elem_id"in m&&e(5,r=m.elem_id),"elem_classes"in m&&e(6,g=m.elem_classes),"visible"in m&&e(7,c=m.visible),"value"in m&&e(8,u=m.value),"container"in m&&e(9,s=m.container),"scale"in m&&e(10,h=m.scale),"min_width"in m&&e(11,k=m.min_width),"grid_cols"in m&&e(12,X=m.grid_cols),"grid_rows"in m&&e(13,v=m.grid_rows),"height"in m&&e(14,D=m.height),"preview"in m&&e(15,H=m.preview),"allow_preview"in m&&e(16,j=m.allow_preview),"object_fit"in m&&e(17,b=m.object_fit),"show_share_button"in m&&e(18,G=m.show_share_button),"show_download_button"in m&&e(19,w=m.show_download_button)},[n,i,a,_,o,r,g,c,u,s,h,k,X,v,D,H,j,b,G,w,q,Y,J]}class fl extends me{constructor(t){super(),ge(this,t,sl,al,be,{loading_status:0,show_label:1,label:2,root:3,root_url:4,elem_id:5,elem_classes:6,visible:7,value:8,container:9,scale:10,min_width:11,grid_cols:12,grid_rows:13,height:14,preview:15,allow_preview:16,object_fit:17,show_share_button:18,show_download_button:19})}}const jl=fl,Bl=["static"];export{jl as Component,Bl as modes}; -//# sourceMappingURL=index-5aed9d46.js.map diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/inference/_common.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/inference/_common.py deleted file mode 100644 index 73c3e61dd89913bf88c4604165f17e8665aa5df6..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/huggingface_hub/inference/_common.py +++ /dev/null @@ -1,289 +0,0 @@ -# coding=utf-8 -# Copyright 2023-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Contains utilities used by both the sync and async inference clients.""" -import base64 -import io -import json -import logging -from contextlib import contextmanager -from pathlib import Path -from typing import ( - TYPE_CHECKING, - Any, - AsyncIterable, - BinaryIO, - ContextManager, - Dict, - Generator, - Iterable, - List, - Optional, - Set, - Union, - overload, -) - -from requests import HTTPError - -from ..constants import ENDPOINT -from ..utils import ( - build_hf_headers, - get_session, - hf_raise_for_status, - is_aiohttp_available, - is_numpy_available, - is_pillow_available, -) -from ..utils._typing import Literal -from ._text_generation import ( - TextGenerationStreamResponse, -) - - -if TYPE_CHECKING: - from aiohttp import ClientResponse, ClientSession - from PIL import Image - -# TYPES -UrlT = str -PathT = Union[str, Path] -BinaryT = Union[bytes, BinaryIO] -ContentT = Union[BinaryT, PathT, UrlT] - -logger = logging.getLogger(__name__) - - -class InferenceTimeoutError(HTTPError, TimeoutError): - """Error raised when a model is unavailable or the request times out.""" - - -## IMPORT UTILS - - -def _import_aiohttp(): - # Make sure `aiohttp` is installed on the machine. - if not is_aiohttp_available(): - raise ImportError("Please install aiohttp to use `AsyncInferenceClient` (`pip install aiohttp`).") - import aiohttp - - return aiohttp - - -def _import_numpy(): - """Make sure `numpy` is installed on the machine.""" - if not is_numpy_available(): - raise ImportError("Please install numpy to use deal with embeddings (`pip install numpy`).") - import numpy - - return numpy - - -def _import_pil_image(): - """Make sure `PIL` is installed on the machine.""" - if not is_pillow_available(): - raise ImportError( - "Please install Pillow to use deal with images (`pip install Pillow`). If you don't want the image to be" - " post-processed, use `client.post(...)` and get the raw response from the server." - ) - from PIL import Image - - return Image - - -## RECOMMENDED MODELS - -# Will be globally fetched only once (see '_fetch_recommended_models') -_RECOMMENDED_MODELS: Optional[Dict[str, Optional[str]]] = None - - -def _get_recommended_model(task: str) -> str: - model = _fetch_recommended_models().get(task) - if model is None: - raise ValueError( - f"Task {task} has no recommended task. Please specify a model explicitly. Visit" - " https://huggingface.co/tasks for more info." - ) - logger.info( - f"Using recommended model {model} for task {task}. Note that it is encouraged to explicitly set" - f" `model='{model}'` as the recommended models list might get updated without prior notice." - ) - return model - - -def _fetch_recommended_models() -> Dict[str, Optional[str]]: - global _RECOMMENDED_MODELS - if _RECOMMENDED_MODELS is None: - response = get_session().get(f"{ENDPOINT}/api/tasks", headers=build_hf_headers()) - hf_raise_for_status(response) - _RECOMMENDED_MODELS = { - task: _first_or_none(details["widgetModels"]) for task, details in response.json().items() - } - return _RECOMMENDED_MODELS - - -def _first_or_none(items: List[Any]) -> Optional[Any]: - try: - return items[0] or None - except IndexError: - return None - - -## ENCODING / DECODING UTILS - - -@overload -def _open_as_binary(content: ContentT) -> ContextManager[BinaryT]: - ... # means "if input is not None, output is not None" - - -@overload -def _open_as_binary(content: Literal[None]) -> ContextManager[Literal[None]]: - ... # means "if input is None, output is None" - - -@contextmanager # type: ignore -def _open_as_binary(content: Optional[ContentT]) -> Generator[Optional[BinaryT], None, None]: - """Open `content` as a binary file, either from a URL, a local path, or raw bytes. - - Do nothing if `content` is None, - - TODO: handle a PIL.Image as input - TODO: handle base64 as input - """ - # If content is a string => must be either a URL or a path - if isinstance(content, str): - if content.startswith("https://") or content.startswith("http://"): - logger.debug(f"Downloading content from {content}") - yield get_session().get(content).content # TODO: retrieve as stream and pipe to post request ? - return - content = Path(content) - if not content.exists(): - raise FileNotFoundError( - f"File not found at {content}. If `data` is a string, it must either be a URL or a path to a local" - " file. To pass raw content, please encode it as bytes first." - ) - - # If content is a Path => open it - if isinstance(content, Path): - logger.debug(f"Opening content from {content}") - with content.open("rb") as f: - yield f - else: - # Otherwise: already a file-like object or None - yield content - - -def _b64_encode(content: ContentT) -> str: - """Encode a raw file (image, audio) into base64. Can be byes, an opened file, a path or a URL.""" - with _open_as_binary(content) as data: - data_as_bytes = data if isinstance(data, bytes) else data.read() - return base64.b64encode(data_as_bytes).decode() - - -def _b64_to_image(encoded_image: str) -> "Image": - """Parse a base64-encoded string into a PIL Image.""" - Image = _import_pil_image() - return Image.open(io.BytesIO(base64.b64decode(encoded_image))) - - -def _bytes_to_dict(content: bytes) -> "Image": - """Parse bytes from a Response object into a Python dictionary. - - Expects the response body to be encoded-JSON data. - """ - return json.loads(content.decode()) - - -def _bytes_to_image(content: bytes) -> "Image": - """Parse bytes from a Response object into a PIL Image. - - Expects the response body to be raw bytes. To deal with b64 encoded images, use `_b64_to_image` instead. - """ - Image = _import_pil_image() - return Image.open(io.BytesIO(content)) - - -## STREAMING UTILS - - -def _stream_text_generation_response( - bytes_output_as_lines: Iterable[bytes], details: bool -) -> Union[Iterable[str], Iterable[TextGenerationStreamResponse]]: - # Parse ServerSentEvents - for byte_payload in bytes_output_as_lines: - # Skip line - if byte_payload == b"\n": - continue - - payload = byte_payload.decode("utf-8") - - # Event data - if payload.startswith("data:"): - # Decode payload - json_payload = json.loads(payload.lstrip("data:").rstrip("/n")) - # Parse payload - output = TextGenerationStreamResponse(**json_payload) - yield output.token.text if not details else output - - -async def _async_stream_text_generation_response( - bytes_output_as_lines: AsyncIterable[bytes], details: bool -) -> Union[AsyncIterable[str], AsyncIterable[TextGenerationStreamResponse]]: - # Parse ServerSentEvents - async for byte_payload in bytes_output_as_lines: - # Skip line - if byte_payload == b"\n": - continue - - payload = byte_payload.decode("utf-8") - - # Event data - if payload.startswith("data:"): - # Decode payload - json_payload = json.loads(payload.lstrip("data:").rstrip("/n")) - # Parse payload - output = TextGenerationStreamResponse(**json_payload) - yield output.token.text if not details else output - - -async def _async_yield_from(client: "ClientSession", response: "ClientResponse") -> AsyncIterable[bytes]: - async for byte_payload in response.content: - yield byte_payload - await client.close() - - -# "TGI servers" are servers running with the `text-generation-inference` backend. -# This backend is the go-to solution to run large language models at scale. However, -# for some smaller models (e.g. "gpt2") the default `transformers` + `api-inference` -# solution is still in use. -# -# Both approaches have very similar APIs, but not exactly the same. What we do first in -# the `text_generation` method is to assume the model is served via TGI. If we realize -# it's not the case (i.e. we receive an HTTP 400 Bad Request), we fallback to the -# default API with a warning message. We remember for each model if it's a TGI server -# or not using `_NON_TGI_SERVERS` global variable. -# -# For more details, see https://github.com/huggingface/text-generation-inference and -# https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task. - -_NON_TGI_SERVERS: Set[Optional[str]] = set() - - -def _set_as_non_tgi(model: Optional[str]) -> None: - _NON_TGI_SERVERS.add(model) - - -def _is_tgi_server(model: Optional[str]) -> bool: - return model not in _NON_TGI_SERVERS diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/importlib_resources/tests/zipdata01/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/importlib_resources/tests/zipdata01/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/deepghs/anime_object_detection/person.py b/spaces/deepghs/anime_object_detection/person.py deleted file mode 100644 index b9871d32bcc9725b5b4c5407f56e5bc8f6035259..0000000000000000000000000000000000000000 --- a/spaces/deepghs/anime_object_detection/person.py +++ /dev/null @@ -1,46 +0,0 @@ -from functools import lru_cache - -from huggingface_hub import hf_hub_download -from imgutils.data import ImageTyping, load_image, rgb_encode - -from onnx_ import _open_onnx_model -from plot import detection_visualize -from yolo_ import _image_preprocess, _data_postprocess - -_PERSON_MODELS = [ - 'person_detect_plus_v1.1_best_m.onnx', - 'person_detect_plus_v1.1_best_s.onnx', - 'person_detect_plus_v1.1_best_n.onnx', - 'person_detect_plus_best_m.onnx', - 'person_detect_best_m.onnx', - 'person_detect_best_x.onnx', - 'person_detect_best_s.onnx', -] -_DEFAULT_PERSON_MODEL = _PERSON_MODELS[0] - - -@lru_cache() -def _open_person_detect_model(model_name): - return _open_onnx_model(hf_hub_download( - 'deepghs/imgutils-models', - f'person_detect/{model_name}' - )) - - -_LABELS = ['person'] - - -def detect_person(image: ImageTyping, model_name: str, max_infer_size=640, - conf_threshold: float = 0.3, iou_threshold: float = 0.5): - image = load_image(image, mode='RGB') - new_image, old_size, new_size = _image_preprocess(image, max_infer_size) - - data = rgb_encode(new_image)[None, ...] - output, = _open_person_detect_model(model_name).run(['output0'], {'images': data}) - return _data_postprocess(output[0], conf_threshold, iou_threshold, old_size, new_size, _LABELS) - - -def _gr_detect_person(image: ImageTyping, model_name: str, max_infer_size=640, - conf_threshold: float = 0.3, iou_threshold: float = 0.5): - ret = detect_person(image, model_name, max_infer_size, conf_threshold, iou_threshold) - return detection_visualize(image, ret, _LABELS) diff --git a/spaces/deepwisdom/MetaGPT/metagpt/roles/researcher.py b/spaces/deepwisdom/MetaGPT/metagpt/roles/researcher.py deleted file mode 100644 index cb4d28c339ad05700d106afa04579bbeb31c1863..0000000000000000000000000000000000000000 --- a/spaces/deepwisdom/MetaGPT/metagpt/roles/researcher.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env python -""" -@Modified By: mashenquan, 2023/8/22. A definition has been provided for the return value of _think: returning false indicates that further reasoning cannot continue. - -""" - -import asyncio - -from pydantic import BaseModel - -from metagpt.actions import CollectLinks, ConductResearch, WebBrowseAndSummarize -from metagpt.actions.research import get_research_system_text -from metagpt.const import RESEARCH_PATH -from metagpt.logs import logger -from metagpt.roles import Role -from metagpt.schema import Message - - -class Report(BaseModel): - topic: str - links: dict[str, list[str]] = None - summaries: list[tuple[str, str]] = None - content: str = "" - - -class Researcher(Role): - def __init__( - self, - name: str = "David", - profile: str = "Researcher", - goal: str = "Gather information and conduct research", - constraints: str = "Ensure accuracy and relevance of information", - language: str = "en-us", - **kwargs, - ): - super().__init__(name, profile, goal, constraints, **kwargs) - self._init_actions([CollectLinks(name), WebBrowseAndSummarize(name), ConductResearch(name)]) - self.language = language - if language not in ("en-us", "zh-cn"): - logger.warning(f"The language `{language}` has not been tested, it may not work.") - - async def _think(self) -> bool: - if self._rc.todo is None: - self._set_state(0) - return True - - if self._rc.state + 1 < len(self._states): - self._set_state(self._rc.state + 1) - else: - self._rc.todo = None - return False - - async def _act(self) -> Message: - logger.info(f"{self._setting}: ready to {self._rc.todo}") - todo = self._rc.todo - msg = self._rc.memory.get(k=1)[0] - if isinstance(msg.instruct_content, Report): - instruct_content = msg.instruct_content - topic = instruct_content.topic - else: - topic = msg.content - - research_system_text = get_research_system_text(topic, self.language) - if isinstance(todo, CollectLinks): - links = await todo.run(topic, 4, 4) - ret = Message("", Report(topic=topic, links=links), role=self.profile, cause_by=type(todo)) - elif isinstance(todo, WebBrowseAndSummarize): - links = instruct_content.links - todos = (todo.run(*url, query=query, system_text=research_system_text) for (query, url) in links.items()) - summaries = await asyncio.gather(*todos) - summaries = list((url, summary) for i in summaries for (url, summary) in i.items() if summary) - ret = Message("", Report(topic=topic, summaries=summaries), role=self.profile, cause_by=type(todo)) - else: - summaries = instruct_content.summaries - summary_text = "\n---\n".join(f"url: {url}\nsummary: {summary}" for (url, summary) in summaries) - content = await self._rc.todo.run(topic, summary_text, system_text=research_system_text) - ret = Message("", Report(topic=topic, content=content), role=self.profile, cause_by=type(self._rc.todo)) - self._rc.memory.add(ret) - return ret - - async def _react(self) -> Message: - while True: - await self._think() - if self._rc.todo is None: - break - msg = await self._act() - report = msg.instruct_content - self.write_report(report.topic, report.content) - return msg - - def write_report(self, topic: str, content: str): - if not RESEARCH_PATH.exists(): - RESEARCH_PATH.mkdir(parents=True) - filepath = RESEARCH_PATH / f"{topic}.md" - filepath.write_text(content) - - -if __name__ == "__main__": - import fire - - async def main(topic: str, language="en-us"): - role = Researcher(topic, language=language) - await role.run(topic) - - fire.Fire(main) diff --git a/spaces/deprem-ml/deprem_satellite_test/utils/istanbul_unet.py b/spaces/deprem-ml/deprem_satellite_test/utils/istanbul_unet.py deleted file mode 100644 index 85a35d4f92c91d6b1032ef3982a16c6f68b5d6fd..0000000000000000000000000000000000000000 --- a/spaces/deprem-ml/deprem_satellite_test/utils/istanbul_unet.py +++ /dev/null @@ -1,21 +0,0 @@ -from utils.download import attempt_download_from_hub -import segmentation_models_pytorch as smp -from utils.dataloader import * -import torch - - -def unet_prediction(input_path, model_path): - model_path = attempt_download_from_hub(model_path) - best_model = torch.load(model_path) - preprocessing_fn = smp.encoders.get_preprocessing_fn('efficientnet-b6', 'imagenet') - - test_dataset = Dataset(input_path, augmentation=get_validation_augmentation(), preprocessing=get_preprocessing(preprocessing_fn)) - image = test_dataset.get() - - x_tensor = torch.from_numpy(image).to("cuda").unsqueeze(0) - pr_mask = best_model.predict(x_tensor) - pr_mask = (pr_mask.squeeze().cpu().numpy().round())*255 - - # Save the predicted mask - cv2.imwrite("output.png", pr_mask) - return 'output.png' \ No newline at end of file diff --git a/spaces/dfyinc/GeniusChat/app.py b/spaces/dfyinc/GeniusChat/app.py deleted file mode 100644 index 74e2534a2e0a42bd595e53bdb02c497c2fd30725..0000000000000000000000000000000000000000 --- a/spaces/dfyinc/GeniusChat/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/microsoft/DialoGPT-large").launch() \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/(2011) Origami Ryujin 1.2 Diagram Satoshi.pdf MAXSPEED.md b/spaces/diacanFperku/AutoGPT/(2011) Origami Ryujin 1.2 Diagram Satoshi.pdf MAXSPEED.md deleted file mode 100644 index 38cb8ddad67eb144ccc5106fdb1b3a6d116ab113..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/(2011) Origami Ryujin 1.2 Diagram Satoshi.pdf MAXSPEED.md +++ /dev/null @@ -1,90 +0,0 @@ -<br /> -<h1>(2011) Origami Ryujin 1.2 Diagram Satoshi.pdf MAXSPEED</h1> -<p>If you are a fan of origami, the art of paper folding, you might have heard of Ryujin, the mythical dragon that is considered one of the most complex and challenging origami models ever created. Ryujin was designed by Satoshi Kamiya, a Japanese origami master who is known for his intricate and realistic designs. Ryujin has several versions, each with different levels of difficulty and detail. One of the most popular versions is Ryujin 1.2, which was published in 2011 and features a dragon with scales, claws, horns, whiskers, and a long tail.</p> -<h2>(2011) Origami Ryujin 1.2 Diagram Satoshi.pdf MAXSPEED</h2><br /><p><b><b>Download Zip</b> ::: <a href="https://gohhs.com/2uFTGT">https://gohhs.com/2uFTGT</a></b></p><br /><br /> -<h2>What is Ryujin 1.2?</h2> -<p>Ryujin 1.2 is an origami model that represents a dragon with a body length of about 3 meters and a wingspan of about 4 meters. It is made from a single sheet of paper measuring 3.6 x 3.6 meters, without any cuts or glue. It has about 1000 scales on its body, which are folded individually using a technique called tessellation. It also has many other details, such as claws, horns, whiskers, eyes, teeth, and a long tail with spikes.</p> -<h2>How to Fold Ryujin 1.2?</h2> -<p>Folding Ryujin 1.2 is not an easy task. It requires a lot of patience, skill, and time. It can take several months or even years to complete this model. You also need a large sheet of paper that is strong and thin enough to handle the multiple layers and folds. You can use any type of paper that suits your preference, such as tissue foil, elephant hide, or kraft paper.</p> -<p>To fold Ryujin 1.2, you need to follow the diagrams that are provided by Satoshi Kamiya in his book <em>Works of Satoshi Kamiya 1995-2003</em>. The book contains 275 pages of diagrams that show every step of the folding process in detail. You can also find some videos on YouTube that show how to fold some parts of Ryujin 1.2, such as the scales or the head.</p> -<h2>Where to Download (2011) Origami Ryujin 1.2 Diagram Satoshi.pdf MAXSPEED?</h2> -<p>If you want to download (2011) Origami Ryujin 1.2 Diagram Satoshi.pdf MAXSPEED, you can visit some of the websites that offer this service. For example, you can go to <a href="https://drive.google.com/file/d/1voXgwFz2GV8yfiYepCmAtf4W5d4CUPU2/view">Google Drive</a>, where you can find the PDF file that contains the diagrams for Ryujin 1.2 and other models by Satoshi Kamiya. You can also go to <a href="https://soundcloud.com/policixan1978/2011-origami-ryujin-12-diagram-satoshipdf-maxspeed">SoundCloud</a>, where you can listen to a podcast that discusses the history and features of Ryujin 1.2.</p> -<h2>Conclusion</h2> -<p>(2011) Origami Ryujin 1.2 Diagram Satoshi.pdf MAXSPEED is a valuable resource for anyone who wants to challenge themselves with one of the most complex and impressive origami models ever created. It contains the diagrams that show how to fold Ryujin 1.2, a dragon with amazing details and realism. You can download this PDF file from various online sources and enjoy folding this masterpiece at your own pace.</p> -<p></p> -<h2>What are the Tips and Tricks for Folding Ryujin 1.2?</h2> -<p>Folding Ryujin 1.2 can be a daunting and frustrating task, especially for beginners or intermediate folders. However, there are some tips and tricks that can help you make the process easier and more enjoyable. Here are some of them:</p> -<ul> -<li>Use a large and flat surface to fold your paper, such as a table or a floor.</li> -<li>Use a ruler, a bone folder, or a fingernail to make sharp and precise creases.</li> -<li>Use a pencil or a pen to mark the reference points and lines on your paper.</li> -<li>Use a pair of tweezers or a chopstick to manipulate the small and delicate parts of the model.</li> -<li>Use some clips or pins to hold the layers of paper together.</li> -<li>Use some glue or tape to reinforce the weak or loose parts of the model.</li> -<li>Use some water or starch to shape and mold the model into its final form.</li> -</ul> -<h2>What are the Benefits of Folding Ryujin 1.2?</h2> -<p>Folding Ryujin 1.2 is not only a hobby or a pastime, but also a beneficial activity that can improve your physical, mental, and emotional well-being. Some of the benefits of folding Ryujin 1.2 are:</p> -<ul> -<li>It improves your hand-eye coordination, fine motor skills, and spatial awareness.</li> -<li>It enhances your concentration, memory, logic, and problem-solving skills.</li> -<li>It stimulates your creativity, imagination, and artistic expression.</li> -<li>It reduces your stress, anxiety, and boredom.</li> -<li>It boosts your confidence, self-esteem, and satisfaction.</li> -<li>It fosters your curiosity, learning, and appreciation for different cultures and traditions.</li> -</ul> -<h2>Where to Share Your Ryujin 1.2?</h2> -<p>If you have successfully folded Ryujin 1.2, you might want to share your achievement and masterpiece with others who share your passion and interest for origami. There are many platforms and communities where you can showcase your Ryujin 1.2 and interact with other origami enthusiasts. For example, you can:</p> -<ul> -<li>Post your photos or videos of your Ryujin 1.2 on social media platforms such as Facebook, Instagram, Twitter, or YouTube.</li> -<li>Join online forums or groups such as Reddit, Quora, or Origami Forum where you can discuss or ask questions about Ryujin 1.2 or other origami models.</li> -<li>Submit your Ryujin 1.2 to online galleries or databases such as Flickr, Origami Database, or Origami Resource Center where you can browse or search for other origami models.</li> -<li>Participate in online contests or challenges such as Origami Challenge or Origami Contest where you can compete or vote for the best origami models.</li> -<li>Attend offline events or workshops such as Origami Convention or Origami Workshop where you can meet or learn from other origami experts or masters.</li> -</ul> -<h2>What are the Differences Between Ryujin 1.2 and Other Versions of Ryujin?</h2> -<p>Ryujin is a series of origami models that depict different versions of the dragon with varying degrees of difficulty and detail. The first version of Ryujin was Ryujin 1.0, which was published in 2003 and featured a dragon with a simple body and head. The second version was Ryujin 1.2, which was published in 2011 and improved the design of the body and head, adding scales, claws, horns, whiskers, and a long tail. The third version was Ryujin 2.0, which was published in 2015 and added wings to the dragon, making it more realistic and majestic. The fourth version was Ryujin 3.5, which was published in 2019 and refined the design of the wings, making them more flexible and expressive.</p> -<h2>What are the Similarities Between Ryujin 1.2 and Other Models by Satoshi Kamiya?</h2> -<p>Ryujin 1.2 is one of the many models that Satoshi Kamiya has designed and created over the years. Satoshi Kamiya is a renowned origami artist who specializes in complex and realistic models of animals, mythical creatures, insects, plants, and more. Some of his most famous models include Bahamut, Ancient Dragon, Phoenix, Hercules Beetle, Wizard, Violinist, and more. Ryujin 1.2 shares some similarities with other models by Satoshi Kamiya, such as:</p> -<ul> -<li>They are made from a single sheet of paper without any cuts or glue.</li> -<li>They have a high level of difficulty and detail that require advanced folding skills and techniques.</li> -<li>They have a realistic and expressive appearance that capture the essence and personality of the subject.</li> -<li>They have a unique and original design that showcases the creativity and innovation of the artist.</li> -</ul> -<h2>How to Appreciate Ryujin 1.2?</h2> -<p>Ryujin 1.2 is more than just an origami model. It is a masterpiece of art and engineering that reflects the passion and dedication of its creator and its folder. To appreciate Ryujin 1.2, you can:</p> -<ul> -<li>Admire its beauty and complexity from different angles and perspectives.</li> -<li>Study its structure and geometry and learn how each fold contributes to the overall shape and form.</li> -<li>Explore its history and culture and learn how it relates to the legend and symbolism of the dragon.</li> -<li>Share your experience and feedback with others who appreciate origami and Ryujin 1.2.</li> -</ul> -<h2>What are the Challenges and Rewards of Folding Ryujin 1.2?</h2> -<p>Folding Ryujin 1.2 is not a task for the faint-hearted. It requires a lot of perseverance, determination, and courage to face the challenges and difficulties that come along the way. Some of the challenges are:</p> -<ul> -<li>The size and weight of the paper, which can be hard to handle and fold.</li> -<li>The number and complexity of the steps, which can be confusing and overwhelming.</li> -<li>The accuracy and precision of the creases, which can affect the final result and appearance.</li> -<li>The time and effort involved, which can be exhausting and frustrating.</li> -</ul> -<p>However, folding Ryujin 1.2 also brings a lot of rewards and satisfaction that make the journey worthwhile. Some of the rewards are:</p> -<ul> -<li>The sense of achievement and accomplishment that comes from completing one of the most difficult origami models ever.</li> -<li>The joy and pride that comes from creating a beautiful and impressive piece of art with your own hands.</li> -<li>The knowledge and skill that comes from learning new and advanced folding techniques and methods.</li> -<li>The fun and excitement that comes from overcoming the challenges and difficulties with your own creativity and ingenuity.</li> -</ul> -<h2>What are the Resources and References for Folding Ryujin 1.2?</h2> -<p>If you want to fold Ryujin 1.2, you will need some resources and references that can help you along the way. Some of the resources and references are:</p> -<ul> -<li>The book <em>Works of Satoshi Kamiya 1995-2003</em>, which contains the diagrams for Ryujin 1.2 and other models by Satoshi Kamiya.</li> -<li>The PDF file (2011) Origami Ryujin 1.2 Diagram Satoshi.pdf MAXSPEED, which you can download from various online sources.</li> -<li>The videos on YouTube that show how to fold some parts of Ryujin 1.2, such as the scales or the head.</li> -<li>The websites or blogs that share tips or tricks for folding Ryujin 1.2, such as Origami Art or Origami Blog.</li> -<li>The forums or groups that discuss or share information or opinions about Ryujin 1.2 or other origami models, such as Reddit, Quora, or Origami Forum.</li> -</ul> -<h2>Conclusion</h2> -<p>(2011) Origami Ryujin 1.2 Diagram Satoshi.pdf MAXSPEED is a useful and amazing resource for anyone who wants to fold Ryujin 1.2, one of the most complex and impressive origami models ever created. It contains the diagrams that show how to fold Ryujin 1.2, a dragon with amazing details and realism. You can download this PDF file from various online sources and enjoy folding this masterpiece at your own pace. By folding Ryujin 1.2, you will be able to improve your origami skills and knowledge, as well as your physical, mental, and emotional well-being.</p> 3cee63e6c2<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Digital Zone- Counter-Strike Source V18 Full Hack Tool Download.md b/spaces/diacanFperku/AutoGPT/Digital Zone- Counter-Strike Source V18 Full Hack Tool Download.md deleted file mode 100644 index 5020b877e3bf8f6ea3e7c26ea31aa3bbd7dcccdb..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Digital Zone- Counter-Strike Source V18 Full Hack Tool Download.md +++ /dev/null @@ -1,38 +0,0 @@ -<h2>Digital Zone- Counter-Strike Source V18 Full Hack Tool Download</h2><br /><p><b><b>Download</b> · <a href="https://gohhs.com/2uFTgV">https://gohhs.com/2uFTgV</a></b></p><br /><br /> -<br /> -Counter Strike Source Engine Development Package $1,100,000 USD. Includes Game and Script - $400,000 - Developer & Publisher: Electronic Arts SKU: Counter-Strike: Global Offensive Main Features: Digital Download Purchase includes full game and script + design notes. Includes Source - -COD 4 digital edition vs COD4 manual: Equip yourself with COD 4, a new and re-designed series with revolutionary gameplay and new weapons and multiplayer modes! Let's check our top 10 secrets to dominate COD 4 - -Best Counter-Strike Source mods! Best Counter-Strike Source hacks! Best Counter-Strike Source cheats! Best Counter-Strike Source tools! Best Counter-Strike Source maps! Best Counter-Strike Source tweaks! Best Counter-Strike Source videos! Best Counter-Strike Source walkthroughs! Best Counter-Strike Source guides! Download it today! - - Use this COD 4 source hack tool to get free experience and unlimited money during multiplayer modes. Just download the software, install it and follow the steps in the manual to obtain the full COD 4 hack tool.Q: - -How to remove a chainring from a derailleur? - -I have a Kona Fu-Lyte. - -I was replacing some front derailleur cable and found that I have a worn out chainring on my rear cluster. - -How do I remove this chainring from the rear cluster? - -A: - -If it is a "brakeless" derailleur, you have to remove the freehub first. - -You don't have to remove the freehub, but you do have to remove the shift cage. - -You can see the type of shift cage and freehub in this picture. - -Now, you need to remove the entire shifter and pull it off. - -Finally, you need to pull the chain ring off of the freehub. To do this, you need to remove the grease fittings on the freehub, and then you can pull the chain ring off with a little help from a small tool. - -You can follow the instructions in this article. - -Bookmark This Web Site - -Shipping Container Homes And 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/diacanFperku/AutoGPT/Oxford Student Atlas For India Pdf Download.md b/spaces/diacanFperku/AutoGPT/Oxford Student Atlas For India Pdf Download.md deleted file mode 100644 index 8ca015138080e255d812d7500f1f3df55ec2049c..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Oxford Student Atlas For India Pdf Download.md +++ /dev/null @@ -1,98 +0,0 @@ - -<h1>Oxford Student Atlas For India Pdf Download: A Review</h1> -<p>If you are looking for a reliable and comprehensive source of geographic information on India and the world, you might want to consider downloading the Oxford Student Atlas for India. This atlas is designed for students preparing for competitive exams conducted by the UPSC, State Public Service Commissions, and other examining bodies. It is also a useful reference for anyone interested in learning more about the physical and political features of India and the world.</p> -<h2>What is the Oxford Student Atlas for India?</h2> -<p>The Oxford Student Atlas for India is a product of Oxford University Press, a renowned publisher of academic books and journals. It is the fourth edition of the atlas, which was first published in 2009. The atlas uses state-of-the-art techniques to produce maps that are accurate and easy to read. It contains a variety of maps, charts, and graphs that provide geographic information on the world, continents, oceans, and the regions and countries of the world. It also includes up-to-date information on various global issues, such as climate change, environmental degradation, population growth, etc.</p> -<h2>Oxford Student Atlas For India Pdf Download</h2><br /><p><b><b>DOWNLOAD</b> — <a href="https://gohhs.com/2uFVhj">https://gohhs.com/2uFVhj</a></b></p><br /><br /> -<h2>What are the features of the Oxford Student Atlas for India?</h2> -<p>The Oxford Student Atlas for India has many features that make it a valuable resource for students and learners. Some of these features are:</p> -<ul> -<li>Special exam-oriented features: The atlas contains 270+ map-based questions on the pattern of the UPSC, State Public Service Commissions, and other competitive exams. The answers are provided in the Oxford Areal app, which can be downloaded for free by scanning the cover of the atlas.</li> -<li>Separate physical and political maps of India and the continents: The atlas provides detailed and clear maps of India and the continents, showing their physical and political features, such as rivers, mountains, boundaries, capitals, etc.</li> -<li>Latest socio-economic maps and data: The atlas covers topical themes such as biosphere reserves, wetlands, power projects, space exploration, etc. It also reflects recent administrative changes in India and the world. The atlas provides latest socio-economic maps and data culled from authoritative sources.</li> -<li>Special sections on the history of map-making and concepts of contour and landforms: The atlas includes informative sections on how maps are made and how to interpret them. It also explains the concepts of contour and landforms with examples and illustrations.</li> -<li>Symbols for each place name from India and the world according to population range: The atlas uses different symbols to indicate the population range of each place name from India and the world. This helps to compare and contrast the population density of different regions.</li> -<li>Fully updated and revised index: The atlas contains a comprehensive index that lists all the place names from India and the world along with their coordinates.</li> -<li>Oxford Areal app: The atlas comes with an interactive digital resource that can be accessed by downloading the free Oxford Areal app and scanning the cover of the atlas. The app provides additional exercises, reference maps, learning resources, animations, videos, quizzes, games, statistics, etc.</li> -</ul> -<h2>How to download the Oxford Student Atlas for India?</h2> -<p>The Oxford Student Atlas for India can be downloaded as a PDF file from various online platforms. Some of these platforms are:</p> -<ul> -<li>Oxford University Press website: You can visit https://india.oup.com/product/oxford-student-atlas-for-india-9789391050849 and purchase the PDF version of the atlas for 357.14 INR.</li> -<li>Scribd website: You can visit https://www.scribd.com/doc/255558611/Oxford-Student-Atlas-for-India-2nd-Edition and download the PDF version of the second edition of the atlas for free.</li> -</ul> -<h2>Conclusion</h2> -<p>The Oxford Student Atlas for India is a must-have resource for anyone who wants to learn more about geography or prepare for competitive exams. It provides accurate and up-to-date information on India and the world in an engaging and interactive way. You can download it as a PDF file from various online platforms or buy it as a paperback book from any bookstore.</p> -<h2>Why should you download the Oxford Student Atlas for India?</h2> -<p>The Oxford Student Atlas for India is not just a collection of maps, but a valuable tool for learning and understanding geography. It can help you to:</p> -<ul> -<li>Improve your general knowledge and awareness of India and the world.</li> -<li>Prepare for various competitive exams that test your geographic skills and knowledge.</li> -<li>Develop your analytical and critical thinking skills by interpreting maps, charts, and graphs.</li> -<li>Enhance your curiosity and interest in exploring different regions and cultures of the world.</li> -<li>Enjoy the beauty and diversity of the natural and human-made features of the world.</li> -</ul> -<h2>How to use the Oxford Student Atlas for India?</h2> -<p>The Oxford Student Atlas for India is easy to use and navigate. You can follow these steps to make the most of it:</p> -<ol> -<li>Choose a topic or theme that you want to learn more about, such as climate, agriculture, history, etc.</li> -<li>Find the relevant map, chart, or graph in the atlas that covers that topic or theme.</li> -<li>Read the title, legend, scale, and other information given on the map, chart, or graph.</li> -<li>Observe the features, patterns, trends, and relationships shown on the map, chart, or graph.</li> -<li>Compare and contrast different regions or countries based on the map, chart, or graph.</li> -<li>Answer the questions given in the atlas or in the Oxford Areal app related to the map, chart, or graph.</li> -<li>Check your answers in the app or in the answer key given at the end of the atlas.</li> -</ol> -<h2>What are some tips and tricks for downloading the Oxford Student Atlas for India?</h2> -<p>If you want to download the Oxford Student Atlas for India as a PDF file, you might want to keep these tips and tricks in mind:</p> -<p></p> -<ul> -<li>Make sure you have a stable internet connection and enough storage space on your device.</li> -<li>Choose a reliable and secure online platform that offers the PDF version of the atlas.</li> -<li>Check the price, edition, format, size, and quality of the PDF file before downloading it.</li> -<li>Use a compatible PDF reader or viewer to open and view the PDF file on your device.</li> -<li>Save or bookmark the PDF file for future reference or offline access.</li> -</ul> -<h2>What are some benefits of downloading the Oxford Student Atlas for India?</h2> -<p>Downloading the Oxford Student Atlas for India as a PDF file has many benefits over buying it as a paperback book. Some of these benefits are:</p> -<ul> -<li>It is cheaper and more convenient than buying a physical copy of the atlas.</li> -<li>It is easier to store and access on your device than carrying a bulky book around.</li> -<li>It is more eco-friendly and saves paper and ink.</li> -<li>It allows you to zoom in and out of the maps, charts, and graphs for better clarity and detail.</li> -<li>It enables you to search for any place name or topic within the PDF file using keywords.</li> -<li>It lets you share the PDF file with others via email or social media.</li> -</ul> -<h2>What are some challenges of downloading the Oxford Student Atlas for India?</h2> -<p>Downloading the Oxford Student Atlas for India as a PDF file also has some challenges that you should be aware of. Some of these challenges are:</p> -<ul> -<li>It requires a compatible device and software to open and view the PDF file.</li> -<li>It may not have the same quality and resolution as the printed version of the atlas.</li> -<li>It may not be updated as frequently as the printed version of the atlas.</li> -<li>It may not have all the features and resources that are available in the Oxford Areal app.</li> -<li>It may be subject to piracy and plagiarism if not downloaded from a legitimate source.</li> -</ul> -<h2>How to overcome the challenges of downloading the Oxford Student Atlas for India?</h2> -<p>If you want to overcome the challenges of downloading the Oxford Student Atlas for India as a PDF file, you can follow these tips:</p> -<ul> -<li>Make sure you have a device that can support PDF files, such as a laptop, tablet, or smartphone.</li> -<li>Download a reliable and secure PDF reader or viewer, such as Adobe Acrobat Reader, Foxit Reader, or Google Chrome.</li> -<li>Download the PDF file from a trusted and verified online platform, such as Oxford University Press website or Scribd website.</li> -<li>Check for updates and revisions of the PDF file regularly and download them if available.</li> -<li>Use the Oxford Areal app along with the PDF file to access additional features and resources.</li> -<li>Avoid sharing or copying the PDF file without proper permission or citation.</li> -</ul> -<h2>What are some alternatives to downloading the Oxford Student Atlas for India?</h2> -<p>If you are not able to download the Oxford Student Atlas for India as a PDF file, or if you prefer other formats or sources of geographic information, you can try some of these alternatives:</p> -<ul> -<li>Buy the paperback version of the atlas from any bookstore or online platform. The paperback version has the same content and features as the PDF version, plus a durable and attractive cover.</li> -<li>Access the online version of the atlas on the Oxford University Press website. The online version has interactive maps, charts, and graphs that you can zoom in and out, pan, and rotate. You can also print or download any map, chart, or graph from the online version.</li> -<li>Use other online platforms or apps that provide geographic information on India and the world, such as Google Maps, Google Earth, National Geographic, etc. These platforms or apps have different features and functions that can help you explore and learn geography.</li> -<li>Refer to other books or atlases that cover geography or related topics, such as The Times Comprehensive Atlas of the World, The World Atlas of Coffee, The Atlas of Beauty, etc. These books or atlases have different perspectives and themes that can enrich your geographic knowledge.</li> -</ul> -<h2>Conclusion</h2> -<p>The Oxford Student Atlas for India is a great resource for anyone who wants to learn more about geography or prepare for competitive exams. It provides accurate and up-to-date information on India and the world in an engaging and interactive way. You can download it as a PDF file from various online platforms or buy it as a paperback book from any bookstore. You can also use other alternatives to access geographic information on India and the world. Whatever format or source you choose, you will surely enjoy and benefit from the Oxford Student Atlas for India.</p> -<h2>Conclusion</h2> -<p>The Oxford Student Atlas for India is a great resource for anyone who wants to learn more about geography or prepare for competitive exams. It provides accurate and up-to-date information on India and the world in an engaging and interactive way. You can download it as a PDF file from various online platforms or buy it as a paperback book from any bookstore. You can also use other alternatives to access geographic information on India and the world. Whatever format or source you choose, you will surely enjoy and benefit from the Oxford Student Atlas for India.</p> 3cee63e6c2<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Paheli 720p In Dual Audio Hindi.md b/spaces/diacanFperku/AutoGPT/Paheli 720p In Dual Audio Hindi.md deleted file mode 100644 index df14c3a5ec06b3ccd582b713c2fe43704e4286b3..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Paheli 720p In Dual Audio Hindi.md +++ /dev/null @@ -1,11 +0,0 @@ -<h2>Paheli 720p In Dual Audio Hindi</h2><br /><p><b><b>Download File</b> ↔ <a href="https://gohhs.com/2uFTKo">https://gohhs.com/2uFTKo</a></b></p><br /><br /> - -Thaandavam (2012) 720p BRRip x264 Eng Subs [Dual Audio] [Hindi 2.0 . Ek Paheli Leela (2015) Hindi 1GB 720p DVDScrRip x264 Team DDH~RG. Round of 16. -LeeLoo's 1st Season. -Bengali Action Movies. -LeeLoo's 1st Season.Bengali Action Movies. -Kishore Kumar, Nadeem Mishra, Ramesh Mishra and others - LeeLoo's 1st Season. -Kishore Kumar, Nadeem Mishra, Ramesh Mishra and others - 8a78ff9644<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/diffusers/stable-diffusion-xl-inpainting/README.md b/spaces/diffusers/stable-diffusion-xl-inpainting/README.md deleted file mode 100644 index c044e3a5ef1f6216787e6eb0ef59aafecff8baa5..0000000000000000000000000000000000000000 --- a/spaces/diffusers/stable-diffusion-xl-inpainting/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: SDXL Inpainting -emoji: 🔥 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false -duplicated_from: runwayml/stable-diffusion-inpainting ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - diff --git a/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/text/tone_sandhi.py b/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/text/tone_sandhi.py deleted file mode 100644 index 0f45b7a72c5d858bcaab19ac85cfa686bf9a74da..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/text/tone_sandhi.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List -from typing import Tuple - -import jieba -from pypinyin import lazy_pinyin -from pypinyin import Style - - -class ToneSandhi(): - def __init__(self): - self.must_neural_tone_words = { - '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝', - '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊', - '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去', - '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号', - '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当', - '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻', - '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂', - '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆', - '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', - '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿', - '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台', - '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算', - '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨', - '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快', - '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜', - '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔', - '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事', - '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', - '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', - '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实', - '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', - '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', - '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', - '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', - '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', - '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', - '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', - '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', - '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', - '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', - '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', - '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', - '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', - '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', - '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', - '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', - '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', - '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', - '扫把', '惦记' - } - self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎" - } - self.punc = ":,;。?!“”‘’':,;.?!" - - # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041 - # e.g. - # word: "家里" - # pos: "s" - # finals: ['ia1', 'i3'] - def _neural_sandhi(self, word: str, pos: str, - finals: List[str]) -> List[str]: - - # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 - for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in { - "n", "v", "a" - } and word not in self.must_not_neural_tone_words: - finals[j] = finals[j][:-1] + "5" - ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": - finals[-1] = finals[-1][:-1] + "5" - elif len(word) >= 1 and word[-1] in "的地得": - finals[-1] = finals[-1][:-1] + "5" - # e.g. 走了, 看着, 去过 - # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}: - # finals[-1] = finals[-1][:-1] + "5" - elif len(word) > 1 and word[-1] in "们子" and pos in { - "r", "n" - } and word not in self.must_not_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 桌上, 地下, 家里 - elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 上来, 下去 - elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开": - finals[-1] = finals[-1][:-1] + "5" - # 个做量词 - elif (ge_idx >= 1 and - (word[ge_idx - 1].isnumeric() or - word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个': - finals[ge_idx] = finals[ge_idx][:-1] + "5" - else: - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - - word_list = self._split_word(word) - finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]] - for i, word in enumerate(word_list): - # conventional neural in Chinese - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals_list[i][-1] = finals_list[i][-1][:-1] + "5" - finals = sum(finals_list, []) - return finals - - def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]: - # e.g. 看不懂 - if len(word) == 3 and word[1] == "不": - finals[1] = finals[1][:-1] + "5" - else: - for i, char in enumerate(word): - # "不" before tone4 should be bu2, e.g. 不怕 - if char == "不" and i + 1 < len(word) and finals[i + - 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - return finals - - def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]: - # "一" in number sequences, e.g. 一零零, 二一零 - if word.find("一") != -1 and all( - [item.isnumeric() for item in word if item != "一"]): - return finals - # "一" between reduplication words shold be yi5, e.g. 看一看 - elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]: - finals[1] = finals[1][:-1] + "5" - # when "一" is ordinal word, it should be yi1 - elif word.startswith("第一"): - finals[1] = finals[1][:-1] + "1" - else: - for i, char in enumerate(word): - if char == "一" and i + 1 < len(word): - # "一" before tone4 should be yi2, e.g. 一段 - if finals[i + 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - # "一" before non-tone4 should be yi4, e.g. 一天 - else: - # "一" 后面如果是标点,还读一声 - if word[i + 1] not in self.punc: - finals[i] = finals[i][:-1] + "4" - return finals - - def _split_word(self, word: str) -> List[str]: - word_list = jieba.cut_for_search(word) - word_list = sorted(word_list, key=lambda i: len(i), reverse=False) - first_subword = word_list[0] - first_begin_idx = word.find(first_subword) - if first_begin_idx == 0: - second_subword = word[len(first_subword):] - new_word_list = [first_subword, second_subword] - else: - second_subword = word[:-len(first_subword)] - new_word_list = [second_subword, first_subword] - return new_word_list - - def _three_sandhi(self, word: str, finals: List[str]) -> List[str]: - if len(word) == 2 and self._all_tone_three(finals): - finals[0] = finals[0][:-1] + "2" - elif len(word) == 3: - word_list = self._split_word(word) - if self._all_tone_three(finals): - # disyllabic + monosyllabic, e.g. 蒙古/包 - if len(word_list[0]) == 2: - finals[0] = finals[0][:-1] + "2" - finals[1] = finals[1][:-1] + "2" - # monosyllabic + disyllabic, e.g. 纸/老虎 - elif len(word_list[0]) == 1: - finals[1] = finals[1][:-1] + "2" - else: - finals_list = [ - finals[:len(word_list[0])], finals[len(word_list[0]):] - ] - if len(finals_list) == 2: - for i, sub in enumerate(finals_list): - # e.g. 所有/人 - if self._all_tone_three(sub) and len(sub) == 2: - finals_list[i][0] = finals_list[i][0][:-1] + "2" - # e.g. 好/喜欢 - elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \ - finals_list[0][-1][-1] == "3": - - finals_list[0][-1] = finals_list[0][-1][:-1] + "2" - finals = sum(finals_list, []) - # split idiom into two words who's length is 2 - elif len(word) == 4: - finals_list = [finals[:2], finals[2:]] - finals = [] - for sub in finals_list: - if self._all_tone_three(sub): - sub[0] = sub[0][:-1] + "2" - finals += sub - - return finals - - def _all_tone_three(self, finals: List[str]) -> bool: - return all(x[-1] == "3" for x in finals) - - # merge "不" and the word behind it - # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error - def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - last_word = "" - for word, pos in seg: - if last_word == "不": - word = last_word + word - if word != "不": - new_seg.append((word, pos)) - last_word = word[:] - if last_word == "不": - new_seg.append((last_word, 'd')) - last_word = "" - return new_seg - - # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听" - # function 2: merge single "一" and the word behind it - # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error - # e.g. - # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')] - # output seg: [['听一听', 'v']] - def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - # function 1 - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][ - 0] == seg[i + 1][0] and seg[i - 1][1] == "v": - new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] - else: - if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][ - 0] == word and pos == "v": - continue - else: - new_seg.append([word, pos]) - seg = new_seg - new_seg = [] - # function 2 - for i, (word, pos) in enumerate(seg): - if new_seg and new_seg[-1][0] == "一": - new_seg[-1][0] = new_seg[-1][0] + word - else: - new_seg.append([word, pos]) - return new_seg - - # the first and the second words are all_tone_three - def _merge_continuous_three_tones( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and self._all_tone_three( - sub_finals_list[i - 1]) and self._all_tone_three( - sub_finals_list[i]) and not merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - - return new_seg - - def _is_reduplication(self, word: str) -> bool: - return len(word) == 2 and word[0] == word[1] - - # the last char of first word and the first char of second word is tone_three - def _merge_continuous_three_tones_2( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \ - merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#": - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_reduplication( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if new_seg and word == new_seg[-1][0]: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def pre_merge_for_modify( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - seg = self._merge_bu(seg) - try: - seg = self._merge_yi(seg) - except: - print("_merge_yi failed") - seg = self._merge_reduplication(seg) - seg = self._merge_continuous_three_tones(seg) - seg = self._merge_continuous_three_tones_2(seg) - seg = self._merge_er(seg) - return seg - - def modified_tone(self, word: str, pos: str, - finals: List[str]) -> List[str]: - finals = self._bu_sandhi(word, finals) - finals = self._yi_sandhi(word, finals) - finals = self._neural_sandhi(word, pos, finals) - finals = self._three_sandhi(word, finals) - return finals diff --git a/spaces/dineshreddy/WALT/mmdet/models/backbones/resnext.py b/spaces/dineshreddy/WALT/mmdet/models/backbones/resnext.py deleted file mode 100644 index 6dbcbd516fd308b1d703eecb83ab275f6b159516..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/backbones/resnext.py +++ /dev/null @@ -1,153 +0,0 @@ -import math - -from mmcv.cnn import build_conv_layer, build_norm_layer - -from ..builder import BACKBONES -from ..utils import ResLayer -from .resnet import Bottleneck as _Bottleneck -from .resnet import ResNet - - -class Bottleneck(_Bottleneck): - expansion = 4 - - def __init__(self, - inplanes, - planes, - groups=1, - base_width=4, - base_channels=64, - **kwargs): - """Bottleneck block for ResNeXt. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottleneck, self).__init__(inplanes, planes, **kwargs) - - if groups == 1: - width = self.planes - else: - width = math.floor(self.planes * - (base_width / base_channels)) * groups - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, width, postfix=1) - self.norm2_name, norm2 = build_norm_layer( - self.norm_cfg, width, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.inplanes, - width, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - fallback_on_stride = False - self.with_modulated_dcn = False - if self.with_dcn: - fallback_on_stride = self.dcn.pop('fallback_on_stride', False) - if not self.with_dcn or fallback_on_stride: - self.conv2 = build_conv_layer( - self.conv_cfg, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - else: - assert self.conv_cfg is None, 'conv_cfg must be None for DCN' - self.conv2 = build_conv_layer( - self.dcn, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.conv3 = build_conv_layer( - self.conv_cfg, - width, - self.planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - if self.with_plugins: - self._del_block_plugins(self.after_conv1_plugin_names + - self.after_conv2_plugin_names + - self.after_conv3_plugin_names) - self.after_conv1_plugin_names = self.make_block_plugins( - width, self.after_conv1_plugins) - self.after_conv2_plugin_names = self.make_block_plugins( - width, self.after_conv2_plugins) - self.after_conv3_plugin_names = self.make_block_plugins( - self.planes * self.expansion, self.after_conv3_plugins) - - def _del_block_plugins(self, plugin_names): - """delete plugins for block if exist. - - Args: - plugin_names (list[str]): List of plugins name to delete. - """ - assert isinstance(plugin_names, list) - for plugin_name in plugin_names: - del self._modules[plugin_name] - - -@BACKBONES.register_module() -class ResNeXt(ResNet): - """ResNeXt backbone. - - Args: - depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. - in_channels (int): Number of input image channels. Default: 3. - num_stages (int): Resnet stages. Default: 4. - groups (int): Group of resnext. - base_width (int): Base width of resnext. - strides (Sequence[int]): Strides of the first block of each stage. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - frozen_stages (int): Stages to be frozen (all param fixed). -1 means - not freezing any parameters. - norm_cfg (dict): dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): whether to use zero init for last norm layer - in resblocks to let them behave as identity. - """ - - arch_settings = { - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, groups=1, base_width=4, **kwargs): - self.groups = groups - self.base_width = base_width - super(ResNeXt, self).__init__(**kwargs) - - def make_res_layer(self, **kwargs): - """Pack all blocks in a stage into a ``ResLayer``""" - return ResLayer( - groups=self.groups, - base_width=self.base_width, - base_channels=self.base_channels, - **kwargs) diff --git a/spaces/drift-ai/recruiter-assistant-jbfxrs/scripts/README.md b/spaces/drift-ai/recruiter-assistant-jbfxrs/scripts/README.md deleted file mode 100644 index 7f95dc7c1ab5849540d0608428e5cfbeb364dea5..0000000000000000000000000000000000000000 --- a/spaces/drift-ai/recruiter-assistant-jbfxrs/scripts/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# JobFixers - -## Code - -Generated from: https://chat.openai.com/share/3bc1e603-e14a-4d9a-b51b-1231e488a95f diff --git a/spaces/elyza/ELYZA-japanese-Llama-2-7b-fast-instruct-demo/style.css b/spaces/elyza/ELYZA-japanese-Llama-2-7b-fast-instruct-demo/style.css deleted file mode 100644 index b185037b671328a7ba248ef74b0f15b98058dca5..0000000000000000000000000000000000000000 --- a/spaces/elyza/ELYZA-japanese-Llama-2-7b-fast-instruct-demo/style.css +++ /dev/null @@ -1,23 +0,0 @@ -h1 { - text-align: center; -} - -#logo { - display: flex; - justify-content: center; - align-items: center; - gap: 50px; -} - -#duplicate-button { - margin: auto; - color: white; - background: #1565c0; - border-radius: 100vh; -} - -#component-0 { - max-width: 900px; - margin: auto; - padding-top: 1.5rem; -} diff --git a/spaces/eson/tokenizer-arena/vocab/llama/__init__.py b/spaces/eson/tokenizer-arena/vocab/llama/__init__.py deleted file mode 100644 index 33726a5706d9d92f84feef34d8ef170fe1a1e69d..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/vocab/llama/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ - -""" - -## 指令 special token - -{"token_id": 29961, "decode_str": "[", "token": "["} -{"token_id": 25580, "decode_str": "INST", "token": "INST"} -{"token_id": 29962, "decode_str": "]", "token": "]"} - -{"token_id": 3532, "decode_str": "<<", "token": "▁<<"} -{"token_id": 14816, "decode_str": "SY", "token": "SY"} -{"token_id": 29903, "decode_str": "S", "token": "S"} -{"token_id": 6778, "decode_str": ">>", "token": ">>"} - -{"token_id": 13, "decode_str": "\n", "token": "<0x0A>"} - -疑问:为什么不将 <<SYS>> <</SYS>> [INST] [/INST] 做成1个id? -""" - -import os -from transformers import LlamaTokenizer -from vocab import TokenizerType, TokenizerImpl - -CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) -TOKENIZER_DIR = os.path.join(CURRENT_DIR, "tokenizer") - - - -tokenizer = LlamaTokenizer.from_pretrained(TOKENIZER_DIR) - - -tokenizer.parent = "" -tokenizer.type = TokenizerType.ByteBPE.name -tokenizer.implementation = TokenizerImpl.SentencePiece.name # https://github.com/facebookresearch/llama/blob/main/llama/tokenizer.py -tokenizer.comments = "split all numbers into individual digits, " \ - "and fallback to bytes to decompose unknown UTF-8 characters" diff --git a/spaces/evaluate-metric/meteor/app.py b/spaces/evaluate-metric/meteor/app.py deleted file mode 100644 index 3bcb6bf27d14ef87a84ffbdfda0721a13c811481..0000000000000000000000000000000000000000 --- a/spaces/evaluate-metric/meteor/app.py +++ /dev/null @@ -1,6 +0,0 @@ -import evaluate -from evaluate.utils import launch_gradio_widget - - -module = evaluate.load("meteor") -launch_gradio_widget(module) diff --git a/spaces/falterWliame/Face_Mask_Detection/Daub Ages 2 0 Keygen 26 __FULL__.md b/spaces/falterWliame/Face_Mask_Detection/Daub Ages 2 0 Keygen 26 __FULL__.md deleted file mode 100644 index 5002af3bb4fb399ccab9f0d1fde947d5bb2a4381..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Daub Ages 2 0 Keygen 26 __FULL__.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>daub ages 2 0 keygen 26</h2><br /><p><b><b>DOWNLOAD</b> ►►►►► <a href="https://urlca.com/2uDdyn">https://urlca.com/2uDdyn</a></b></p><br /><br /> -<br /> -... 0.8 http://granabun.htw.pl/pdf-password-cracker-pro-standart-enterprise-v3.2.0.1- ... http://granabun.htw.pl/software-collection-pack-2009-781.html Wed, 26 Jan ... ://granabun.htw.pl/audialsone-v4.1.2010.1000-incl-keygen-and-patch-136.html ... monthly 0.8 http://granabun.htw.pl/daub-ages-1.53-bilanguage-690.html Tue, ... 1fdad05405<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/falterWliame/Face_Mask_Detection/Onone Perfect Resize 7.5 Keygen Generator [UPDATED].md b/spaces/falterWliame/Face_Mask_Detection/Onone Perfect Resize 7.5 Keygen Generator [UPDATED].md deleted file mode 100644 index 3a8aaa8b670ed489465e24935efcc7040b2bd03e..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Onone Perfect Resize 7.5 Keygen Generator [UPDATED].md +++ /dev/null @@ -1,193 +0,0 @@ - -<h1>Onone Perfect Resize 7.5 Keygen Generator - How to Resize Your Photos with Ease</h1> - -<p>If you are looking for a way to resize your photos without losing quality, you might want to try Onone Perfect Resize 7.5. This is a powerful software that allows you to enlarge or reduce your images up to 300 times without any distortion or artifacts. You can also crop, rotate, watermark, edit, and merge multiple photos with this software.</p> -<h2>onone perfect resize 7.5 keygen generator</h2><br /><p><b><b>Download</b> › <a href="https://urlca.com/2uDbXJ">https://urlca.com/2uDbXJ</a></b></p><br /><br /> - -<p>However, Onone Perfect Resize 7.5 is not a free software. You need to purchase a license to use it. But don't worry, there is a way to get it for free. You can use a keygen generator to create a valid serial number for Onone Perfect Resize 7.5. A keygen generator is a program that generates random codes that can activate a software.</p> - -<p>In this article, we will show you how to download and install Onone Perfect Resize 7.5 keygen generator, how to use it to generate a serial number, and how to use Onone Perfect Resize 7.5 to resize your photos.</p> - -<h2>How to Download and Install Onone Perfect Resize 7.5 Keygen Generator</h2> - -<p>The first step is to download and install Onone Perfect Resize 7.5 keygen generator on your computer. You can find it on various websites that offer cracked software, such as LexCliq.com, SoundCloud.com, or Sway.office.com. However, you need to be careful about the source of the keygen generator, as some of them may contain viruses or malware that can harm your computer.</p> - -<p>To download and install Onone Perfect Resize 7.5 keygen generator safely, follow these steps:</p> -<p></p> - -<ol> -<li>Go to one of the websites that offer Onone Perfect Resize 7.5 keygen generator, such as LexCliq.com</li> -<li>Find the download link for Onone Perfect Resize 7.5 keygen generator and click on it</li> -<li>Save the ZIP file to your computer and extract it</li> -<li>Run the keygen generator executable file and follow the instructions on the screen</li> -<li>Wait for the installation to complete and close the keygen generator</li> -</ol> - -<h2>How to Use Onone Perfect Resize 7.5 Keygen Generator to Generate a Serial Number</h2> - -<p>The next step is to use Onone Perfect Resize 7.5 keygen generator to generate a serial number for Onone Perfect Resize 7.5. A serial number is a code that consists of letters and numbers that can activate a software.</p> - -<p>To use Onone Perfect Resize 7.5 keygen generator to generate a serial number, follow these steps:</p> - -<ol> -<li>Run the keygen generator again and click on the Generate button</li> -<li>A random serial number will appear on the screen</li> -<li>Copy the serial number and save it somewhere safe</li> -<li>Close the keygen generator</li> -</ol> - -<h2>How to Use Onone Perfect Resize 7.5 to Resize Your Photos</h2> - -<p>The final step is to use Onone Perfect Resize 7.5 to resize your photos. Onone Perfect Resize 7.5 is a software that allows you to enlarge or reduce your images up to 300 times without any distortion or artifacts. You can also crop, rotate, watermark, edit, and merge multiple photos with this software.</p> - -<p>To use Onone Perfect Resize 7.5 to resize your photos, follow these steps:</p> - -<ol> -<li>Download and install Onone Perfect Resize 7.5 from the official website or from another source</li> -<li>Launch Onone Perfect Resize 7.5 and enter the serial number that you generated with the keygen generator</li> -<li>Select the photo or photos that you want to resize and open them in Onone Perfect Resize 7.5</li> -<li>Adjust the size of your photo or photos using the sliders or by entering the desired dimensions in pixels or inches</li> -<li>You can also crop, rotate, watermark, edit, and merge your photos using the tools on the left panel</li> -<li>Preview your resized photo or photos on the right panel and make any changes if needed</li> -<li>Click on the Save button and choose a destination folder and a file format for your resized photo or photos</li> -<li>Enjoy your resized photo or photos with high quality and resolution</li> -</ol> - -<h2>Conclusion</h2> - -<p>Onone Perfect Resize 7.5 is a powerful software that allows you to resize your photos without losing quality. You can also crop, rotate, watermark, edit, and merge multiple photos with this software. However, Onone Perfect Resize 7.5 is not a free software. You need to purchase a license to use it.</p> - -<p>But don't worry, there is a way to get it for free. You can use a keygen generator to create a valid serial number for Onone Perfect Resize 7.5. A keygen generator is a program that generates random codes that can activate a software.</p> - -<p>In this article, we have shown you how to download and install Onone Perfect Resize 7.5 keygen generator, how to use it to generate a serial number, and how to use Onone Perfect Resize 7.5 to resize your photos.</p> - -<p>We hope this article has helped you understand what Onone Perfect Resize 7.5 keygen generator is, why you need it for Onone Perfect Resize 7.5, and how you can download and install it for free.</p> - -<p>If you have any questions or feedback, please leave a comment below.</p> -<h2>How to Resize Your Photos with Onone Perfect Resize 7.5</h2> - -<p>Onone Perfect Resize 7.5 is a software that allows you to resize your photos without losing quality. You can enlarge or reduce your images up to 300 times without any distortion or artifacts. You can also crop, rotate, watermark, edit, and merge multiple photos with this software.</p> - -<p>To resize your photos with Onone Perfect Resize 7.5, follow these steps:</p> - -<ol> -<li>Launch Onone Perfect Resize 7.5 and enter the serial number that you generated with the keygen generator</li> -<li>Select the photo or photos that you want to resize and open them in Onone Perfect Resize 7.5</li> -<li>Adjust the size of your photo or photos using the sliders or by entering the desired dimensions in pixels or inches</li> -<li>You can also crop, rotate, watermark, edit, and merge your photos using the tools on the left panel</li> -<li>Preview your resized photo or photos on the right panel and make any changes if needed</li> -<li>Click on the Save button and choose a destination folder and a file format for your resized photo or photos</li> -<li>Enjoy your resized photo or photos with high quality and resolution</li> -</ol> - -<h2>Benefits of Using Onone Perfect Resize 7.5</h2> - -<p>Onone Perfect Resize 7.5 is not only a software that allows you to resize your photos without losing quality, but also a software that offers many benefits for your photo editing needs. By using Onone Perfect Resize 7.5, you can enjoy some of the following advantages:</p> - -<ul> -<li>Improved image quality and performance in any size and resolution</li> -<li>Reduced file size and storage space by optimizing your images</li> -<li>Enhanced compatibility and flexibility with different image formats and devices</li> -<li>Increased creativity and productivity by using more than 20 professional tools</li> -<li>Simplified and streamlined workflow by integrating with other Onone software and Photoshop</li> -</ul> - -<p>Onone Perfect Resize 7.5 is a software that you don't want to miss if you are looking for a way to resize your photos without losing quality. It can make your photo editing experience more enjoyable and satisfying.</p> - -<h2>FAQs about Onone Perfect Resize 7.5 Keygen Generator</h2> - -<p>Here are some of the frequently asked questions and answers about Onone Perfect Resize 7.5 keygen generator:</p> - -<ul> -<li><b>What is a keygen generator?</b></li> -<p>A keygen generator is a program that generates random codes that can activate a software. A keygen generator can help you get a software for free without purchasing a license.</p> -<li><b>Is Onone Perfect Resize 7.5 keygen generator safe to use?</b></li> -<p>Onone Perfect Resize 7.5 keygen generator is safe to use, as long as you download it from a reliable website like LexCliq.com, SoundCloud.com, or Sway.office.com. However, you should always scan any file you download with an antivirus software before opening or installing it.</p> -<li><b>What are the system requirements for Onone Perfect Resize 7.5?</b></li> -<p>To use Onone Perfect Resize 7.5, you need to have a Windows operating system (XP, Vista, 7, 8, or 10), a processor of at least 1 GHz, a memory of at least 4 GB RAM, a hard disk space of at least 1 GB, and an internet connection.</p> -<li><b>Where can I get more help or support for Onone Perfect Resize 7.5?</b></li> -<p>If you need more help or support for Onone Perfect Resize 7.5, you can contact Onone customer service or visit their website or forums for more information.</p> -</ul> - -<h2>Conclusion</h2> - -<p>Onone Perfect Resize 7.5 is a powerful software that allows you to resize your photos without losing quality. You can also crop, rotate, watermark, edit, and merge multiple photos with this software. However, Onone Perfect Resize 7.5 is not a free software. You need to purchase a license to use it.</p> - -<p>But don't worry, there is a way to get it for free. You can use a keygen generator to create a valid serial number for Onone Perfect Resize 7.5. A keygen generator is a program that generates random codes that can activate a software.</p> - -<p>In this article, we have shown you how to download and install Onone Perfect Resize 7.5 keygen generator, how to use it to generate a serial number, and how to use Onone Perfect Resize 7.5 to resize your photos.</p> - -<p>We hope this article has helped you understand what Onone Perfect Resize 7.5 keygen generator is, why you need it for Onone Perfect Resize 7.5, and how you can download and install it for free.</p> - -<p>If you have any questions or feedback, please leave a comment below.</p> -<h2>How to Download and Install Onone Perfect Resize 7.5</h2> - -<p>Before you can use Onone Perfect Resize 7.5 to resize your photos, you need to download and install it on your computer. You can get it from the official website or from another source that offers cracked software. However, you need to be careful about the source of the software, as some of them may contain viruses or malware that can harm your computer.</p> - -<p>To download and install Onone Perfect Resize 7.5 safely, follow these steps:</p> - -<ol> -<li>Go to the official website of Onone or another website that offers Onone Perfect Resize 7.5, such as LexCliq.com</li> -<li>Find the download link for Onone Perfect Resize 7.5 and click on it</li> -<li>Save the ZIP file to your computer and extract it</li> -<li>Run the setup file and follow the instructions on the screen</li> -<li>Wait for the installation to complete and close the setup</li> -</ol> - -<h2>How to Integrate Onone Perfect Resize 7.5 with Photoshop</h2> - -<p>One of the best features of Onone Perfect Resize 7.5 is that it can integrate with Photoshop, which is a popular photo editing software. By integrating Onone Perfect Resize 7.5 with Photoshop, you can access and use Onone Perfect Resize 7.5 from within Photoshop, without having to switch between different programs.</p> - -<p>To integrate Onone Perfect Resize 7.5 with Photoshop, follow these steps:</p> - -<ol> -<li>Launch Photoshop and go to the Edit menu</li> -<li>Select Preferences and then Plug-Ins</li> -<li>Check the box that says Additional Plug-Ins Folder and click on Choose</li> -<li>Browse to the folder where you installed Onone Perfect Resize 7.5 and select it</li> -<li>Click on OK and restart Photoshop</li> -<li>To use Onone Perfect Resize 7.5 from within Photoshop, go to the File menu and select Automate</li> -<li>You will see Onone Perfect Resize 7.5 as one of the options</li> -<li>Select it and you will be able to use Onone Perfect Resize 7.5 as a plug-in for Photoshop</li> -</ol> - -<h2>Tips and Tricks for Using Onone Perfect Resize 7.5</h2> - -<p>Onone Perfect Resize 7.5 is a software that allows you to resize your photos without losing quality. You can also crop, rotate, watermark, edit, and merge multiple photos with this software. However, there are some tips and tricks that can help you use Onone Perfect Resize 7.5 more effectively and efficiently.</p> - -<p>Here are some of them:</p> - -<ul> -<li>Use the presets to quickly resize your photos according to common sizes and formats, such as web, print, or mobile</li> -<li>Use the gallery wrap feature to create a border around your photo that can be wrapped around a canvas or frame</li> -<li>Use the tiling feature to split your photo into smaller pieces that can be printed separately and assembled together</li> -<li>Use the sharpening feature to enhance the details and edges of your resized photo</li> -<li>Use the batch processing feature to resize multiple photos at once with the same settings</li> -</ul> - -<p>Onone Perfect Resize 7.5 is a software that can help you resize your photos without losing quality. You can also crop, rotate, watermark, edit, and merge multiple photos with this software.</p> - -<h2>Conclusion</h2> - -<p>Onone Perfect Resize 7.5 is a powerful software that allows you to resize your photos without losing quality. You can also crop, rotate, watermark, edit, and merge multiple photos with this software. However, Onone Perfect Resize 7.5 is not a free software. You need to purchase a license to use it.</p> - -<p>But don't worry, there is a way to get it for free. You can use a keygen generator to create a valid serial number for Onone Perfect Resize 7.5. A keygen generator is a program that generates random codes that can activate a software.</p> - -<p>In this article, we have shown you how to download and install Onone Perfect Resize 7.5 keygen generator, how to use it to generate a serial number, how to use Onone Perfect Resize 7.5 to resize your photos, how to integrate it with Photoshop, and some tips and tricks for using it.</p> - -<p>We hope this article has helped you understand what Onone Perfect Resize 7.5 keygen generator is, why you need it for Onone Perfect Resize 7.5, and how you can download and install it for free.</p> - -<p>If you have any questions or feedback, please leave a comment below.</p> -<h2>Conclusion</h2> - -<p>Onone Perfect Resize 7.5 is a powerful software that allows you to resize your photos without losing quality. You can also crop, rotate, watermark, edit, and merge multiple photos with this software. However, Onone Perfect Resize 7.5 is not a free software. You need to purchase a license to use it.</p> - -<p>But don't worry, there is a way to get it for free. You can use a keygen generator to create a valid serial number for Onone Perfect Resize 7.5. A keygen generator is a program that generates random codes that can activate a software.</p> - -<p>In this article, we have shown you how to download and install Onone Perfect Resize 7.5 keygen generator, how to use it to generate a serial number, how to use Onone Perfect Resize 7.5 to resize your photos, how to integrate it with Photoshop, and some tips and tricks for using it.</p> - -<p>We hope this article has helped you understand what Onone Perfect Resize 7.5 keygen generator is, why you need it for Onone Perfect Resize 7.5, and how you can download and install it for free.</p> - -<p>If you have any questions or feedback, please leave a comment below.</p> 3cee63e6c2<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/PASSWORD Forza Motorsport 4 2011 PC Windows Full Game Cracked !EXCLUSIVE!.md b/spaces/falterWliame/Face_Mask_Detection/PASSWORD Forza Motorsport 4 2011 PC Windows Full Game Cracked !EXCLUSIVE!.md deleted file mode 100644 index 37f8bbee9bb55d32755973fa3bdd6728d7eeb8ff..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/PASSWORD Forza Motorsport 4 2011 PC Windows Full Game Cracked !EXCLUSIVE!.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>PASSWORD Forza Motorsport 4 2011 PC Windows Full Game Cracked</h2><br /><p><b><b>Download File</b> ✔ <a href="https://urlca.com/2uDdD2">https://urlca.com/2uDdD2</a></b></p><br /><br /> -<br /> -PASSWORD Forza Motorsport 4 2011 [PC Windows Full Game] Cracked ->>> http://bltlly.com/11845q. 4d29de3e1b<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/fatiXbelha/sd/CarX Street Apkrabi The Best Racing Game from the Makers of CarX Drift Racing 2.md b/spaces/fatiXbelha/sd/CarX Street Apkrabi The Best Racing Game from the Makers of CarX Drift Racing 2.md deleted file mode 100644 index 009f329b12abb3a71170024d42e6454fec34d2ee..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/CarX Street Apkrabi The Best Racing Game from the Makers of CarX Drift Racing 2.md +++ /dev/null @@ -1,100 +0,0 @@ - -<h1>CarX Street Apkrabi: A Review of the Open World Racing Game</h1> - <p>If you are looking for a realistic and immersive racing game that lets you explore a dynamic open world, you might want to check out CarX Street Apkrabi. This game is developed by CarX Technologies, the makers of CarX Drift Racing 2, and is currently in open beta testing. In this article, we will review the features, gameplay, graphics and performance of CarX Street Apkrabi and tell you how to download and install it on your Android device.</p> - <h2>Features of CarX Street Apkrabi</h2> - <p>CarX Street Apkrabi offers a variety of features that make it stand out from other racing games. Here are some of them:</p> -<h2>carx street apkrabi</h2><br /><p><b><b>Download</b> <a href="https://urllie.com/2uNvMF">https://urllie.com/2uNvMF</a></b></p><br /><br /> - <h3>Career Mode</h3> - <p>In career mode, you can choose to drive at top speed or drift through turns. You can also join clubs, defeat bosses and prove that you are the best driver in Sunset City. You can buy houses for your cars and assemble collections for every race mode. You can also fuel up with the right gas for the next race at city gas stations.</p> - <h3>Car Tuning</h3> - <p>You can build the car of your dreams using part tuning that unlocks all the physics of CarX Technology car behavior. You can swap parts and trick out your car for a specific race. You can also upgrade the engine, transmission, body, suspension and tires. You can even swap the engine of your unique car.</p> - <h3>Visual Car Tuning</h3> - <p>You can customize the mirrors, headlights, lights, skirt, bumper, rims and much more. You can create a unique look for your car.</p> - <h3>Realistic Physics and Controls</h3> - <p>The game boasts of impressive physics and controls that make you the master of your car. You can feel the difference between different types of cars and surfaces. You can also adjust the steering sensitivity, brake strength and camera angle to suit your preferences.</p> - <h3>High-Quality Graphics and Open World</h3> - <p>The game features modern, high-quality graphics and an enormous open world that you can explore at any time of day or night. The game has a dynamic day/night cycle and weather effects that add to the realism. You can admire the stunning scenery and landmarks of Sunset City as you race or drift.</p> - <h2>Gameplay of CarX Street Apkrabi</h2> - <p>The gameplay of CarX Street Apkrabi is simple and intuitive. You can use the on-screen buttons or tilt your device to steer your car. You can also use the handbrake button to drift or slide. You can switch between different camera views to get a better perspective of the action.</p> -<p>carx street apk download free<br /> -carx street racing game for android<br /> -carx street open world drift<br /> -carx street mod apk unlimited money<br /> -carx street beta test apk<br /> -carx street sunset city<br /> -carx street realistic physics<br /> -carx street car customization<br /> -carx street online multiplayer<br /> -carx street latest version apk<br /> -carx street best cars<br /> -carx street tips and tricks<br /> -carx street gameplay review<br /> -carx street cheats and hacks<br /> -carx street offline mode<br /> -carx street graphics settings<br /> -carx street controller support<br /> -carx street system requirements<br /> -carx street update news<br /> -carx street download size<br /> -carx street vs carx drift racing 2<br /> -carx street how to drift<br /> -carx street part tuning guide<br /> -carx street club races<br /> -carx street gas stations<br /> -carx street engine swap<br /> -carx street visual tuning<br /> -carx street dynamic day/night cycle<br /> -carx street career mode<br /> -carx street houses and collections<br /> -carx street bug report<br /> -carx street feedback and suggestions<br /> -carx street data privacy and security<br /> -carx street ratings and reviews<br /> -carx street google play store link [^1^]<br /> -carx street apkcombo download link [^2^]<br /> -carx street license agreement [^3^]<br /> -carx street privacy policy [^4^]<br /> -carx street official site [^5^]</p> - <p>The game offers different modes of racing such as highway races, city races and drift races. You can compete against other players online or against AI opponents offline. You can also challenge yourself with various missions and tasks that reward you with money and reputation points.</p> - <p>You can use the money to buy new cars or upgrade your existing ones. You can also use the reputation points to unlock new parts and locations. You can access your garage from anywhere in the game world and change your car settings or appearance.</p> - <h2>Graphics and Performance of CarX Street Apkrabi</h2> - <p>The graphics and performance of CarX Street Apkrabi are impressive for a mobile game. The game runs smoothly on most devices with minimal lag or glitches. The game also has realistic sound effects and music that enhance the atmosphere.</p> - <p>The game requires about 1 GB of storage space on your device and an internet connection to play online. The game also supports cloud saving so you can sync your progress across different devices.</p> - <h2>How to Download and Install CarX Street Apkrabi</h2> - <p>If you want to try out CarX Street Apkrabi on your Android device, you need to download and install its APK file from a reliable source such as APKCombo. Here are the steps to do so:</p> - <ol> -<li>Go to [APKCombo](^2^) on your browser and search for CarX Street Apkrabi.</li> -<li>Select the latest version of the APK file and tap on the download button.</li> -<li>Wait for the download to finish and then open the file manager on your device.</li> -<li>Locate the downloaded APK file and tap on it to install it.</li> -<li>Allow the installation of unknown sources if prompted by your device settings.</li> -<li>Wait for the installation to complete and then launch the game from your app drawer or home screen.</li> -</ol> - <p>Congratulations, you have successfully installed CarX Street Apkrabi on your Android device. Enjoy the game and have fun!</p> - <h2>Conclusion</h2> - <p>CarX Street Apkrabi is a realistic and immersive racing game that lets you explore a dynamic open world. You can customize your car, join clubs, compete with other players, and experience different modes of racing. The game has high-quality graphics, realistic physics, and smooth performance. You can download and install the game from APKCombo using the steps above. If you are a fan of racing games, you should definitely give CarX Street Apkrabi a try.</p> - <h2>FAQs</h2> - <h3>Is CarX Street Apkrabi free to play?</h3> - <p>Yes, CarX Street Apkrabi is free to play. However, it may contain in-app purchases that can enhance your gameplay or unlock additional features.</p> - <h3>Is CarX Street Apkrabi safe to download and install?</h3> - <p>Yes, CarX Street Apkrabi is safe to download and install from APKCombo. APKCombo is a trusted source of APK files that are verified and scanned for viruses and malware. However, you should always be careful when downloading and installing any APK file from unknown sources and check the permissions and reviews before installing.</p> - <h3>What are the minimum requirements to play CarX Street Apkrabi?</h3> - <p>The minimum requirements to play CarX Street Apkrabi are: - Android 5.0 or higher - 1 GB of RAM - 1 GB of storage space - Internet connection</p> - <h3>How can I contact the developers of CarX Street Apkrabi?</h3> - <p>You can contact the developers of CarX Street Apkrabi by visiting their official website or by sending an email to support@carx-tech.com. You can also follow them on Facebook, Instagram, YouTube and Twitter for the latest news and updates.</p> - <h3>How can I provide feedback or report bugs for CarX Street Apkrabi?</h3> - <p>You can provide feedback or report bugs for CarX Street Apkrabi by using the in-game feedback form or by sending an email to feedback@carx-tech.com. You can also join their Discord server or Reddit community to interact with other players and developers.</p> - <h2>Outline of the Article</h2> - <table> -<tr><th>H1</th><th>H2</th><th>H3</th><th>H4</th></tr> -<tr><td>CarX Street Apkrabi: A Review of the Open World Racing Game</td><td></td><td></td><td></td></tr> -<tr><td></td><td>Features of CarX Street Apkrabi</td><td></td><td></td></tr> -<tr><td></td><td></td><td>Career Mode</td><td></td></tr> -<tr><td></td><td></td><td>Car Tuning</td><td></td></tr> -<tr><td></td><td></td><td>Visual Car Tuning</td><td></td></tr> -<tr><td></td><td></td><td>Realistic Physics and Controls</td><td></td></tr> -<tr><td></td><td></td><td>High-Quality Graphics and Open World</td><td></td></tr> -<tr><td></td><td>Gameplay of CarX Street Apkrabi</td><td></td><td></td></tr> -<tr><td></td><td>Graphics and Performance of CarX Street Apkrabi</td><td></td><td></t</p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Aether X 2 32 bits APK and Play on Your Android Device.md b/spaces/fatiXbelha/sd/Download Aether X 2 32 bits APK and Play on Your Android Device.md deleted file mode 100644 index c412c73e5bf980a7054a236d87a0fea42c37cf52..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Aether X 2 32 bits APK and Play on Your Android Device.md +++ /dev/null @@ -1,177 +0,0 @@ -<br /> -<h1>AetherSX2: The Ultimate Android Game for PS2 Emulation</h1> -<p>If you are a fan of PlayStation 2 games and want to enjoy them on your Android device, you need a reliable and powerful PS2 emulator. And that's where AetherSX2 comes in. AetherSX2 is one of the best PS2 emulators for Android that lets you play your favorite PS2 games with high-quality graphics, smooth gameplay, and customizable controls. In this article, we will tell you everything you need to know about AetherSX2, including how to download and install it, why it is better than other PS2 emulators, and how to optimize your gaming experience with it.</p> -<h2>aether x 2 32 bits apk</h2><br /><p><b><b>Download Zip</b> >>> <a href="https://urllie.com/2uNBM5">https://urllie.com/2uNBM5</a></b></p><br /><br /> - <h2>What is AetherSX2?</h2> -<h3>A brief introduction to the app and its features</h3> -<p>AetherSX2 is an Android app that emulates the PlayStation 2 console on your smartphone or tablet. It is based on the PCSX2 project, which is a popular PS2 emulator for Windows, Linux, and Mac. AetherSX2 aims to provide a similar level of emulation quality and compatibility as PCSX2, but optimized for Android devices.</p> -<p>Some of the features of AetherSX2 are:</p> -<ul> -<li>It supports most PS2 games, including popular titles like God of War, Final Fantasy X, Kingdom Hearts, Grand Theft Auto, Metal Gear Solid, and more.</li> -<li>It allows you to adjust the graphics settings according to your device's capabilities and preferences. You can change the resolution, aspect ratio, frame rate, anti-aliasing, texture filtering, and more.</li> -<li>It offers various control options, such as using the touchscreen, an external gamepad, or a virtual controller. You can also customize the layout and size of the buttons.</li> -<li>It supports various audio formats, such as Dolby Digital, DTS, PCM, ADPCM, and more. You can also enable or disable sound effects and music.</li> -<li>It has a user-friendly interface that lets you easily browse and launch your games from your device's storage or an external SD card.</li> -</ul> - <h3>How to download and install AetherSX2 APK on your Android device</h3> -<p>To download and install AetherSX2 APK on your Android device, you need to follow these steps:</p> -<ol> -<li>Go to [AetherSX2 APK (Android Game) - Free Download - APKCombo](^1^) or [Download AetherSX2 APK - Latest Version 2023 - APKCombo](^1^) and download the latest version of the app.</li> -<li>Once the download is complete, tap on the APK file to start the installation process. You may need to enable unknown sources in your device's settings if you haven't done so before.</li> -<li>Follow the instructions on the screen to complete the installation.</li> -<li>Launch the app and grant it the necessary permissions to access your device's storage and microphone.</li> -<li>Enjoy playing your PS2 games on your Android device!</li> -</ol> - <h2 <h2>Why choose AetherSX2 over other PS2 emulators?</h2> -<h3>The advantages of AetherSX2 in terms of performance, compatibility, and customization</h3> -<p>AetherSX2 is not the only PS2 emulator for Android, but it is certainly one of the best. Compared to other PS2 emulators, such as DamonPS2, Play!, and PTWOE, AetherSX2 has several advantages that make it stand out.</p> -<p>First of all, AetherSX2 has a high performance and stability. It can run most PS2 games at full speed or close to it, without lagging, crashing, or freezing. It also has a low battery consumption and does not overheat your device. AetherSX2 uses a dynamic recompiler and a multi-threaded mode to optimize the emulation speed and efficiency.</p> -<p>aether x 2 android emulator apk<br /> -aether x 2 ps2 emulator apk<br /> -aether x 2 apk download for android<br /> -aether x 2 apk latest version<br /> -aether x 2 apk free download<br /> -aether x 2 apk modded<br /> -aether x 2 apk no ads<br /> -aether x 2 apk premium<br /> -aether x 2 apk pro<br /> -aether x 2 apk cracked<br /> -aether x 2 apk full version<br /> -aether x 2 apk unlocked<br /> -aether x 2 apk offline<br /> -aether x 2 apk online<br /> -aether x 2 apk update<br /> -aether x 2 apk old version<br /> -aether x 2 apk beta<br /> -aether x 2 apk stable<br /> -aether x 2 apk review<br /> -aether x 2 apk features<br /> -aether x 2 apk compatibility<br /> -aether x 2 apk requirements<br /> -aether x 2 apk installation guide<br /> -aether x 2 apk troubleshooting<br /> -aether x 2 apk support<br /> -aether x 2 apk feedback<br /> -aether x 2 apk ratings<br /> -aether x 2 apk alternatives<br /> -aether x 2 apk comparison<br /> -aether x 2 apk best settings<br /> -aether x 2 apk cheats codes<br /> -aether x 2 apk tips tricks<br /> -aether x 2 apk gameplay videos<br /> -aether x 2 apk screenshots images<br /> -aether x 2 apk wallpapers themes<br /> -aether x 2 apk bios files download<br /> -aether x 2 apk iso roms download<br /> -aether x 2 apk games list download<br /> -aether x 2 apk games compatibility list<br /> -aether x 2 apk games performance list<br /> -aether x 2 psx emulator for android apk <br /> -how to use aether x 2 on android phone <br /> -how to play ps1 games on android with aether x 2 <br /> -how to play psx games on android with aethersx <br /> -how to install and configure psx emulator on android</p> -<p>Secondly, AetherSX2 has a high compatibility and accuracy. It can run over 90% of the PS2 game library, including some games that are not supported by other emulators. It also has a high emulation accuracy, meaning that it can reproduce the original graphics, sound, and gameplay of the PS2 games without glitches or errors. AetherSX2 supports various file formats, such as ISO, BIN, IMG, NRG, and more.</p> -<p>Thirdly, AetherSX2 has a high customization and flexibility. It allows you to tweak the graphics settings to suit your device's capabilities and preferences. You can change the resolution, aspect ratio, frame rate, anti-aliasing, texture filtering, and more. You can also choose from different control options, such as using the touchscreen, an external gamepad, or a virtual controller. You can also customize the layout and size of the buttons.</p> - <h3>The best PS2 games to play on AetherSX2</h3> -<p>With AetherSX2, you can play hundreds of PS2 games on your Android device. But which ones are the best? Here are some of the most popular and recommended PS2 games to play on AetherSX2:</p> -<table> -<tr> -<th>Game</th> -<th>Genre</th> -<th>Description</th> -</tr> -<tr> -<td>God of War</td> -<td>Action-adventure</td> -<td>A thrilling hack-and-slash game that follows the epic journey of Kratos, a Spartan warrior who seeks revenge against the gods of Olympus.</td> -</tr> -<tr> -<td>Final Fantasy X</td> -<td>Role-playing</td> -<td>A classic JRPG that tells the story of Tidus, a young athlete who gets transported to a fantasy world called Spira and joins a group of adventurers to save it from a monstrous threat called Sin.</td> -</tr> -<tr> -<td>Kingdom Hearts</td> -<td>Action role-playing</td> -<td>A crossover game that combines characters and worlds from Disney and Final Fantasy franchises. It follows the adventures of Sora, a boy who wields a magical weapon called the Keyblade and teams up with Donald Duck and Goofy to stop the evil forces of darkness.</td> -</tr> -<tr <tr> -<td>Grand Theft Auto: San Andreas</td> -<td>Action-adventure</td> -<td>A sandbox game that lets you explore a vast open world of crime, violence, and fun. You play as CJ, a former gangster who returns to his hometown of San Andreas and gets involved in various missions and activities.</td> -</tr> -<tr> -<td>Metal Gear Solid 3: Snake Eater</td> -<td>Stealth-action</td> -<td>A prequel to the Metal Gear Solid series that takes place in the Cold War era. You play as Naked Snake, a special agent who infiltrates a Soviet jungle base and faces off against a rogue unit of elite soldiers.</td> -</tr> -<tr> -<td>Shadow of the Colossus</td> -<td>Action-adventure</td> -<td>A unique and artistic game that challenges you to defeat 16 gigantic creatures called colossi in order to revive a mysterious girl. You have to use your wits, skills, and horse to find and exploit their weaknesses.</td> -</tr> -</table> - <h2>How to optimize your gaming experience with AetherSX2?</h2> -<h3>Tips and tricks for setting up the app and configuring the settings</h3> -<p>To get the most out of AetherSX2, you need to set up the app and configure the settings properly. Here are some tips and tricks to help you do that:</p> -<ul> -<li>Before you launch the app, make sure you have enough storage space on your device or SD card. You will need at least 4 GB of free space to store your PS2 games and save data.</li> -<li>When you launch the app, you will see a list of PS2 games that are available on your device or SD card. You can also scan for new games by tapping on the refresh icon on the top right corner.</li> -<li>To start playing a game, just tap on it and wait for it to load. You can also long-press on a game to access more options, such as deleting, renaming, or moving it.</li> -<li>To access the settings menu, tap on the gear icon on the top left corner. You can change various settings related to graphics, audio, controls, system, and more.</li> -<li>To adjust the graphics settings, go to the graphics tab and select the renderer you want to use. You can choose between OpenGL and Vulkan, depending on your device's compatibility and performance. You can also change the resolution, aspect ratio, frame rate, anti-aliasing, texture filtering, and more.</li> -<li>To adjust the audio settings, go to the audio tab and select the output format you want to use. You can choose between Dolby Digital, DTS, PCM, ADPCM, and more. You can also enable or disable sound effects and music.</li> -<li>To adjust the control settings, go to the controls tab and select the input method you want to use. You can choose between touchscreen, external gamepad, or virtual controller. You can also customize the layout and size of the buttons.</li> -<li>To adjust the system settings, go to the system tab and select the BIOS file you want to use. You can also change the language, region, time zone, and date format of the emulator.</li> -<li>To save your settings, tap on the save icon on the top right corner. You can also reset your settings to default by tapping on the reset icon on the top left corner.</li> -</ul> - <h3>How to use cheats, save states, and screenshots on AetherSX2</h3> -<p>AetherSX2 also has some features that enhance your gaming experience, such as cheats, save states, and screenshots. Here is how to use them:</p> -<ul <ul> -<li>To use cheats, go to the cheats tab and tap on the plus icon on the top right corner. You can then enter the cheat code or name and enable or disable it. You can also import cheat codes from a text file or scan for them using a memory editor.</li> -<li>To use save states, go to the save states tab and tap on the slot you want to use. You can then save or load your game progress at any point. You can also delete or rename your save states.</li> -<li>To use screenshots, go to the screenshots tab and tap on the camera icon on the top right corner. You can then capture a screenshot of your game and save it to your device's gallery. You can also share or delete your screenshots.</li> -</ul> - <h2>Conclusion</h2> -<p>AetherSX2 is an amazing PS2 emulator for Android that lets you play your favorite PS2 games with high-quality graphics, smooth gameplay, and customizable controls. It is easy to download and install, and it has a high performance, compatibility, and customization. It also has some features that enhance your gaming experience, such as cheats, save states, and screenshots. If you are looking for a PS2 emulator for Android, you should definitely give AetherSX2 a try. You will not regret it!</p> - <h2>FAQs</h2> -<h3>What are the minimum requirements for running AetherSX2 on Android?</h3> -<p>The minimum requirements for running AetherSX2 on Android are:</p> -<ul> -<li>An Android device with Android 5.0 or higher</li> -<li>A quad-core processor with 1.5 GHz or higher</li> -<li>2 GB of RAM or more</li> -<li>4 GB of free storage space or more</li> -<li>An OpenGL ES 3.0 or Vulkan compatible GPU</li> -</ul> - <h3>Is AetherSX2 safe and legal to use?</h3> -<p>AetherSX2 is safe and legal to use, as long as you follow some rules:</p> -<ul> -<li>You should only download and install AetherSX2 from trusted sources, such as [AetherSX2 APK (Android Game) - Free Download - APKCombo] or [Download AetherSX2 APK - Latest Version 2023 - APKCombo].</li> -<li>You should only play PS2 games that you own legally and have ripped from your own discs. You should not download or share pirated games.</li> -<li>You should not use cheats or mods that give you an unfair advantage or harm other players in online games.</li> -</ul> - <h3>How can I update AetherSX2 to the latest version?</h3> -<p>To update AetherSX2 to the latest version, you need to follow these steps:</p> -<ol <ol> -<li>Go to [AetherSX2 APK (Android Game) - Free Download - APKCombo] or [Download AetherSX2 APK - Latest Version 2023 - APKCombo] and check if there is a new version of the app available.</li> -<li>If there is a new version, download the APK file and install it over the existing app. You do not need to uninstall the previous version.</li> -<li>If there is no new version, you can check the app's official website or social media pages for any news or updates.</li> -</ol> - <h3>How can I contact the developers of AetherSX2 for feedback or support?</h3> -<p>To contact the developers of AetherSX2 for feedback or support, you can use one of the following methods:</p> -<ul> -<li>You can send an email to aethersx2@gmail.com and describe your issue or suggestion.</li> -<li>You can join the AetherSX2 Discord server and chat with other users and developers. You can find the invite link on the app's website or social media pages.</li> -<li>You can follow the AetherSX2 Twitter account and send a direct message or tweet to them. You can also stay updated on the latest news and updates from them.</li> -</ul> - <h3>Where can I find more information about AetherSX2?</h3> -<p>To find more information about AetherSX2, you can visit one of the following sources:</p> -<ul> -<li>The app's official website: [AetherSX2 - The Ultimate PS2 Emulator for Android]</li> -<li>The app's official YouTube channel: [AetherSX2 - YouTube]</li> -<li>The app's official Reddit community: [r/AetherSX2]</li> -</ul></p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Red Cross Certificate Online Fast Easy and Convenient.md b/spaces/fatiXbelha/sd/Download Red Cross Certificate Online Fast Easy and Convenient.md deleted file mode 100644 index 33795b00de9643fbbfcb0ac48e410997be7c9627..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Red Cross Certificate Online Fast Easy and Convenient.md +++ /dev/null @@ -1,97 +0,0 @@ - -<h1>How to Download a Red Cross Certificate</h1> -<p>If you have taken a Red Cross training course, such as CPR, First Aid, or Lifeguarding, you may be wondering how to download your certificate of completion. A Red Cross certificate is a digital document that verifies your skills and knowledge in a specific area. In this article, we will explain what a Red Cross certificate is, why it is important, and how to access, print, or order it.</p> -<h2>download red cross certificate</h2><br /><p><b><b>Download Zip</b> ✫ <a href="https://urllie.com/2uNIuH">https://urllie.com/2uNIuH</a></b></p><br /><br /> - <h2>What is a Red Cross Certificate?</h2> -<p>A Red Cross certificate is a digital document that shows that you have successfully completed a Red Cross training course. It contains your name, the course name, the date of completion, and a unique ID and QR code. It also has the Red Cross logo and signature of the instructor or authorized representative.</p> - <h3>Types of Red Cross Certificates</h3> -<p>There are different types of Red Cross certificates depending on the course you have taken. Some of the most common ones are:</p> -<ul> -<li>CPR/AED Certification: This certificate shows that you have learned how to perform cardiopulmonary resuscitation (CPR) and use an automated external defibrillator (AED) in case of cardiac arrest.</li> -<li>First Aid Certification: This certificate shows that you have learned how to provide basic first aid for common injuries and illnesses.</li> -<li>BLS/CPR for Healthcare Providers Certification: This certificate shows that you have learned how to perform CPR and use an AED in a healthcare setting.</li> -<li>Lifeguarding Certification: This certificate shows that you have learned how to prevent, recognize, and respond to aquatic emergencies.</li> -<li>Babysitting & Child Care Certification: This certificate shows that you have learned how to care for children and infants in a babysitting or child care setting.</li> -</ul> - <h3>Benefits of Red Cross Certificates</h3> -<p>Having a Red Cross certificate can have many benefits for you and others. Some of them are:</p> -<ul> -<li>It proves your skills and knowledge in a specific area.</li> -<li>It can help you meet job requirements or enhance your resume.</li> -<li>It can help you save lives or prevent injuries in case of an emergency.</li> -<li>It can give you confidence and peace of mind.</li> -</ul> - <h2>How to Access Your Digital Certificate</h2> -<p>There are three ways to access your digital certificate after completing a Red Cross training course. You can use any of them depending on your preference and convenience.</p> - <h3>Via Email Link</h3> -<p>If you have provided your email address when registering for the course, you will receive an email from the Red Cross after your class with a link to your digital certificate. You can simply click on the link and view, print, or share your certificate as needed.</p> - <h3>Via Red Cross Website</h3> -<p>If you did not receive an email or deleted it by mistake, you can still access your digital certificate through the Red Cross website. You will need to visit <a href="(^1^)">redcross.org/take-a-class/digital-certificate</a> and enter your email address or first name, last name, year and month when the class was held, or certificate ID. You will then be directed to your digital certificate where you can view, print, or share it as needed.</p> - <h3>Via QR Code</h3> -<p>If you have a smart device with a camera and a QR reader app, you can also access your digital certificate by scanning the QR code on it. The QR code is located at the bottom right corner of your certificate and contains the same information as the ID. You can scan the QR code with your device and view, print, or share your certificate as needed.</p> - <h2>How to Print or Order a Wallet Card</h2> -<p>If you prefer to have a physical copy of your certificate, you can either print it yourself or order a wallet card from the Red Cross.</p> -<p>How to download red cross certificate online<br /> -Red cross digital certificate login and download<br /> -Download red cross CPR certification PDF<br /> -Red cross first aid certification download and print<br /> -Lost red cross certificate recovery and download<br /> -Download red cross lifeguard certification card<br /> -Red cross certificate verification and download<br /> -Download red cross babysitting certification course<br /> -Red cross water safety instructor certification download<br /> -Download red cross CNA certification exam<br /> -Red cross bloodborne pathogens certification download<br /> -Download red cross wilderness first aid certification<br /> -Red cross instructor certification download and renewal<br /> -Download red cross AED certification training<br /> -Red cross BLS certification download and recertification<br /> -Download red cross ALS and PALS certification bridge<br /> -Red cross aquatic examiner service certification download<br /> -Download red cross resuscitation suite program certification<br /> -Red cross safety training for swim coaches certification download<br /> -Download red cross advanced child care training online certification<br /> -Red cross pet first aid certification download and access<br /> -Download red cross emergency medical response certification<br /> -Red cross anaphylaxis and epinephrine auto-injector certification download<br /> -Download red cross administering emergency oxygen certification<br /> -Red cross asthma inhaler training certification download<br /> -Download red cross basic life support for healthcare providers certification<br /> -Red cross cat and dog first aid online course certification download<br /> -Download red cross psychological first aid: supporting yourself and others during covid-19 certification<br /> -Red cross first aid for opioid overdoses online course certification download<br /> -Download red cross first aid for severe trauma (fast) course certification</p> - <h3>How to Print Your Certificate</h3> -<p>If you want to print your certificate yourself, you can do so by following these steps:</p> -<ol> -<li>Access your digital certificate via email link, website, or QR code as explained above.</li> -<li>Click on the "Print" button at the top right corner of your certificate.</li> -<li>Select your printer settings and preferences and click "OK".</li> -<li>Cut out your certificate along the dotted lines and fold it in half.</li> -</ol> -<p>You can also save your certificate as a PDF file and print it later if you wish.</p> - <h3>How to Order a Wallet Card</h3> -<p>If you want to order a wallet card from the Red Cross, you can do so by following these steps:</p> -<ol> -<li>Access your digital certificate via email link, website, or QR code as explained above.</li> -<li>Click on the "Order a Wallet Card" button at the top right corner of your certificate.</li> -<li>Fill out the order form with your personal and payment information and click "Submit".</li> -<li>You will receive a confirmation email with your order details and tracking number.</li> -</ol> -<p>The wallet card will be mailed to you within 7-10 business days. The wallet card is made of durable plastic and has the same information as your digital certificate. It also has a barcode that can be scanned to access your digital certificate online.</p> - <h2>Conclusion</h2> -<p>A Red Cross certificate is a valuable document that shows that you have completed a Red Cross training course and have the skills and knowledge to handle various situations. You can access, print, or order your digital certificate in different ways depending on your preference and convenience. Having a Red Cross certificate can help you meet job requirements, enhance your resume, save lives, prevent injuries, and give you confidence and peace of mind.</p> - <h2>FAQs</h2> -<p>Here are some frequently asked questions about Red Cross certificates:</p> - <h4>Q: How long is my Red Cross certificate valid for?</h4> -<p>A: Most Red Cross certificates are valid for two years from the date of completion. However, some courses may have different validity periods depending on the industry standards or regulations. You can check the expiration date of your certificate on the bottom left corner of it.</p> - <h4>Q: How can I renew my Red Cross certificate?</h4> -<p>A: You can renew your Red Cross certificate by taking a review or recertification course before your current certificate expires. You can find the available courses and schedules on the Red Cross website or contact your local Red Cross chapter for more information.</p> - <h4>Q: What if I lose my Red Cross certificate?</h4> -<p>A: If you lose your Red Cross certificate, you can access it online anytime via email link, website, or QR code as explained above. You can also print it yourself or order a wallet card if you need a physical copy. If you need further assistance, you can contact the Red Cross customer service at 1-800-RED-CROSS or support@redcrosstraining.org.</p> - <h4>Q: Can I share my Red Cross certificate with others?</h4> -<p>A: Yes, you can share your Red Cross certificate with others if you want to. You can do so by clicking on the "Share" button at the top right corner of your certificate and choosing the option that suits you best. You can share your certificate via email, social media, or web link. You can also download it as an image file and attach it to other documents.</p> - <h4>Q: How can I verify someone else's Red Cross certificate?</h4> -<p>A: If you want to verify someone else's Red Cross certificate, you can do so by scanning the QR code or entering the ID on their certificate on the Red Cross website at <a href="">redcross.org/confirm</a>. You will be able to see their name, course name, date of completion, and instructor name. You can also contact the Red Cross customer service at 1-800-RED-CROSS or support@redcrosstraining.org for further verification.</p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fclong/summary/fengshen/utils/universal_checkpoint.py b/spaces/fclong/summary/fengshen/utils/universal_checkpoint.py deleted file mode 100644 index ff19ccbb6346507f4200c06de38efd4618eb1840..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/utils/universal_checkpoint.py +++ /dev/null @@ -1,41 +0,0 @@ -from pytorch_lightning.callbacks import ModelCheckpoint -import os - - -class UniversalCheckpoint(ModelCheckpoint): - @staticmethod - def add_argparse_args(parent_args): - parser = parent_args.add_argument_group('universal checkpoint callback') - - parser.add_argument('--monitor', default='step', type=str) - parser.add_argument('--mode', default='max', type=str) - parser.add_argument('--save_ckpt_path', default='./ckpt/', type=str) - parser.add_argument('--load_ckpt_path', default='./ckpt/', type=str) - parser.add_argument( - '--filename', default='model-ep{epoch:02d}-st{step:d}', type=str) - parser.add_argument('--save_last', action='store_true', default=False) - parser.add_argument('--save_top_k', default=10, type=float) - parser.add_argument('--every_n_train_steps', default=None, type=float) - parser.add_argument('--save_weights_only', action='store_true', default=False) - parser.add_argument('--every_n_epochs', default=None, type=int) - parser.add_argument('--save_on_train_epoch_end', action='store_true', default=None) - - return parent_args - - def __init__(self, args): - super().__init__(monitor=args.monitor, - save_top_k=args.save_top_k, - mode=args.mode, - every_n_train_steps=args.every_n_train_steps, - save_weights_only=args.save_weights_only, - dirpath=args.save_ckpt_path, - filename=args.filename, - save_last=args.save_last, - every_n_epochs=args.every_n_epochs, - save_on_train_epoch_end=args.save_on_train_epoch_end) - - # 做兼容,如果目录不存在的话把这个参数去掉,不然会报错 - if args.load_ckpt_path is not None and \ - not os.path.exists(args.load_ckpt_path): - print('--------warning no checkpoint found--------, remove args') - args.load_ckpt_path = None diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Minecraft and Explore the World of Crafting and Building.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Minecraft and Explore the World of Crafting and Building.md deleted file mode 100644 index a5987a12dd51a6483c434f323c99f3570473b78a..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Minecraft and Explore the World of Crafting and Building.md +++ /dev/null @@ -1,158 +0,0 @@ - -<h1>How to Download Minecraft Crafting and Building</h1> -<p>If you are a fan of Minecraft, you might have heard of a new free building game called <strong>Minecraft Crafting and Building</strong>. This game is inspired by the popular sandbox game, but it has some unique features and gameplay that make it stand out. In this article, we will show you how to download Minecraft Crafting and Building for free, how to start playing the game, how to craft and build in the game, and how to improve your Minecraft builds with some tips and tricks. Let's get started!</p> - <h2>What is Minecraft Crafting and Building?</h2> -<h3>A brief introduction to the game and its features</h3> -<p>Minecraft Crafting and Building is a new free building game developed by PROTON MOBILE. It was released on April 17, 2023, and it has over 50 million downloads on Google Play Store. The game is designed for the whole family, from kids to adults, who love building games.</p> -<h2>download minecraft crafting and building</h2><br /><p><b><b>DOWNLOAD</b> ✺✺✺ <a href="https://gohhs.com/2uPpV5">https://gohhs.com/2uPpV5</a></b></p><br /><br /> -<p>The game allows you to create your own world with blocks of different types, shapes, and colors. You can build anything you can imagine, from houses, castles, temples, farms, villages, cities, to monuments, statues, pixel art, and more. You can also decorate your world with furniture, plants, animals, paintings, and other items.</p> -<p>The game also has a multiplayer mode, where you can play online with your friends or other players from around the world. You can visit their worlds, chat with them, help them with their builds, or compete with them in building contests. You can also choose your character's appearance, gender, skin color, hair style, clothes, and accessories.</p> -<h3>How it differs from the original Minecraft</h3> -<p>Minecraft Crafting and Building is not an official Minecraft game. It is a fan-made game that is inspired by Minecraft but has some differences. Some of the main differences are:</p> -<ul> -<li>The game is completely free to download and play. You don't need to pay anything or have a premium account to access all the features of the game.</li> -<li>The game has no survival mode or adventure mode. It is purely a creative mode game where you can build anything you want without any limitations or dangers.</li> -<li>The game has no monsters or enemies. You don't have to worry about fighting zombies, skeletons, creepers, spiders, or other hostile mobs. You can also play peacefully with friendly animals like dogs, cats, horses, cows, sheep, pigs, chickens, etc.</li> -<li>The game has more block types than Minecraft. It has over 100 different blocks that you can use for your builds. Some of them are grass block, stone block, wood block, brick block, glass block, metal block, marble block, sand block, dirt block, snow block, ice block, lava block, water block, the backpack button on the top right corner of the screen. You can drag and drop blocks and items from your inventory to your hotbar or vice versa. You can also tap on the trash can button to delete blocks and items from your inventory.</li> -<li>To access the recipe book in the game world, you can tap on the book button on the top left corner of the screen. You can scroll through the list of blocks and items that you can craft in the game. You can also tap on the magnifying glass button to search for a specific block or item by name. To craft a block or item, you need to have the required materials in your inventory. You can tap on the craft button to create the block or item and add it to your inventory.</li> -<li>To access the structure-like feature in the game world, you can tap on the hammer button on the bottom right corner of the screen. You can choose from a variety of structures that you can create in the game, such as houses, castles, bridges, towers, etc. You can also customize the size, color, and orientation of the structure. To create a structure, you need to have enough blocks in your inventory. You can tap on the build button to place the structure in front of you.</li> -<li>To access the camera mode in the game world, you can tap on the camera button on the bottom left corner of the screen. You can take screenshots of your builds and share them with others. You can also adjust the zoom, angle, and filter of the camera. To take a screenshot, you can tap on the shutter button. To share a screenshot, you can tap on the share button and choose your preferred app or platform.</li> -<li>To access the chat system in the game world, you can tap on the chat button on the top right corner of the screen. You can chat with other players online by typing your message and sending it. You can also use emojis and stickers to express yourself. To chat with a specific player, you can tap on their name and start a private conversation.</li> -</ul> -<p>These are the basic gameplay and controls of Minecraft Crafting and Building. You can also explore other features and options of the game by tapping on the menu button on the top left corner of the screen.</p> - <h2>How to Craft and Build in the Game</h2> -<h3>The crafting system and the recipe book</h3> -<p>Minecraft Crafting and Building has a simple and intuitive crafting system that allows you to create various blocks and items in the game. The crafting system is based on a recipe book that shows you how to craft any block or item in the game.</p> -<p>The recipe book is divided into several categories, such as basic blocks, decorative blocks, functional blocks, furniture, plants, animals, paintings, etc. Each category contains a list of blocks and items that belong to that category. Each block or item has a name, an icon, a description, and a list of materials that are required to craft it.</p> -<p>To craft a block or item, you need to have the required materials in your inventory. You can find the materials by breaking blocks or picking up items in the game world. You can also craft some materials from other materials using the recipe book. For example, you can craft wood planks from wood logs, or bricks from clay.</p> -<p>download minecraft crafting and building for free<br /> -download minecraft crafting and building apk<br /> -download minecraft crafting and building mod<br /> -download minecraft crafting and building online<br /> -download minecraft crafting and building for pc<br /> -download minecraft crafting and building for android<br /> -download minecraft crafting and building for ios<br /> -download minecraft crafting and building game<br /> -download minecraft crafting and building app<br /> -download minecraft crafting and building guide<br /> -how to download minecraft crafting and building<br /> -where to download minecraft crafting and building<br /> -best way to download minecraft crafting and building<br /> -download minecraft crafting and building latest version<br /> -download minecraft crafting and building update<br /> -download minecraft crafting and building cheats<br /> -download minecraft crafting and building hacks<br /> -download minecraft crafting and building tips<br /> -download minecraft crafting and building tricks<br /> -download minecraft crafting and building tutorial<br /> -download minecraft crafting and building review<br /> -download minecraft crafting and building gameplay<br /> -download minecraft crafting and building walkthrough<br /> -download minecraft crafting and building demo<br /> -download minecraft crafting and building beta<br /> -download minecraft crafting and building full version<br /> -download minecraft crafting and building windows 10/11<br /> -download minecraft crafting and building macos<br /> -download minecraft crafting and building linux<br /> -download minecraft crafting and building server software<br /> -download minecraft crafting and building maps<br /> -download minecraft crafting and building skins<br /> -download minecraft crafting and building texture packs<br /> -download minecraft crafting and building resource packs<br /> -download minecraft crafting and building shaders<br /> -download minecraft crafting and building mods pack<br /> -download minecraft crafting and building addons<br /> -download minecraft crafting and building plugins<br /> -download minecraft crafting and building seeds<br /> -download minecraft crafting and building worlds<br /> -download minecraft crafting and building city of hythe map [^4^]<br /> -how to play multiplayer in minecraft crafting and building [^1^]<br /> -how to install apk of minecraft crafting and building [^3^]<br /> -how to build your house in a castle or in a mine in minecraft [^1^]<br /> -how to explore the world of your friends in minecraft [^1^]<br /> -how to play with pets in minecraft [^1^]<br /> -how to decorate your house with furniture in minecraft [^1^]<br /> -how to choose your character skin in minecraft [^1^]<br /> -how to play with villagers and animals in minecraft [^1^]</p> -<p>To access the recipe book, you can tap on the book button on the top left corner of the screen. You can scroll through the categories and the blocks and items in each category. You can also search for a specific block or item by name using the magnifying glass button. To craft a block or item, you can tap on it and see its details. You can then tap on the craft button to create it and add it to your inventory.</p> -<p>Here is an example of how to craft a wooden chair in the game:</p> -<ol> -<li>Open the recipe book and search for "wooden chair".</li> -<li>Tap on the wooden chair icon and see its details. You will need 4 wood planks and 2 sticks to craft it.</li> -<li>Check your inventory and see if you have enough wood planks and sticks. If not, you can craft them from wood logs using the recipe book.</li> -<li>Tap on the craft button to create the wooden chair and add it to your inventory.</li> -<li>Close the recipe book and open your inventory. Drag and drop the wooden chair from your inventory to your hotbar.</li> -<li>Place the wooden chair in your world by tapping on the plus button.</li> -</ol> -<p>Congratulations! You have crafted a wooden chair in Minecraft Crafting and Building. You can use it to decorate your house or sit on it for fun.</p> - <h3>The building system and the structure-like feature</h3> -<p>Minecraft Crafting and Building has a flexible and powerful building system that allows you to build anything you can imagine in the game. The building system is based on a structure-like feature that allows you to create complex structures with one click.</p> -<p>The structure-like feature is a tool that lets you choose from a variety of structures that you can create in the game, such as houses, castles, bridges, towers, etc. You can also customize the size, color, and orientation of the structure. To create a structure, you need to have enough blocks in your inventory. You can then place the structure in your world by tapping on the build button.</p> -<p>To access the structure-like feature, you can tap on the hammer button on the bottom right corner of the screen. You can scroll through the list of structures that you can create in the game. You can also tap on the settings button to customize the size, color, and orientation of the structure. To create a structure, you need to have enough blocks in your inventory. You can tap on the build button to place the structure in front of you.</p> -<p>Here is an example of how to create a castle in the game:</p> -<ol> -<li>Open the structure-like feature and search for "castle".</li> -<li>Tap on the castle icon and see its details. You will need 200 stone blocks, 100 brick blocks, and 50 glass blocks to create it.</li> -<li>Check your inventory and see if you have enough blocks. If not, you can craft them from other materials using the recipe book.</li> -<li>Tap on the settings button to customize the size, color, and orientation of the castle. You can make it bigger or smaller, change its color, and rotate it as you like.</li> -<li>Tap on the build button to place the castle in your world.</li> -</ol> -<p>Congratulations! You have created a castle in Minecraft Crafting and Building. You can explore it, decorate it, or defend it from invaders.</p> - <h2>How to Improve Your Minecraft Builds</h2> -<h3>Some tips and tricks to make your builds more creative and realistic</h3> -<p>Minecraft Crafting and Building is a game that lets you unleash your creativity and imagination. However, sometimes you might feel stuck or bored with your builds. If you want to improve your Minecraft builds and make them more creative and realistic, here are some tips and tricks that you can try:</p> -<ul> -<li>Use different block types and colors to create contrast and texture in your builds. For example, you can use wood blocks for the walls, stone blocks for the roof, and glass blocks for the windows of your house.</li> -<li>Use furniture and items to add details and functionality to your builds. For example, you can use beds, chests, tables, chairs, lamps, paintings, etc. to furnish your house.</li> -<li>Use plants and animals to add life and nature to your builds. For example, you can use flowers, trees, grass, crops, etc. to create a garden or a farm around your house. You can also use dogs, cats, horses, cows, sheep, pigs, chickens, etc. to create a pet or a farm animal.</li> -<li>Use paintings and signs to add personality and style to your builds. For example, you can use paintings to decorate your walls with art or pictures. You can also use signs to write messages or names on your builds.</li> -<li>Use fireworks and balloons to add fun and celebration to your builds. For example, you can use fireworks to create a colorful display in the sky above your builds. You can also use balloons to create a festive atmosphere around your builds.</li> -</ul> -<p>These are some of the tips and tricks that you can use to improve your Minecraft builds. You can also experiment with different combinations and variations of blocks, items, plants, and animals to create your own unique and original builds.</p> - <h3>Some examples of amazing builds by other players</h3> -<p>If you need some inspiration or motivation for your Minecraft builds, you can check out some of the amazing builds by other players in the game. You can visit their worlds, admire their creations, learn from their techniques, or even collaborate with them. Here are some of the examples of amazing builds by other players in Minecraft Crafting and Building:</p> -<table> -<tr> -<th>Build</th> -<th>Description</th> -</tr> -<tr> -<td>A modern mansion</td> -<td>A large and luxurious house with a pool, a garage, a garden, and a helipad.</td> -</tr> -<tr> -<td>A medieval castle</td> -<td>A fortified and majestic structure with towers, walls, gates, and a moat.</td> -</tr> -<tr> -<td>A spaceship</td> -<td>A futuristic and sleek vehicle with engines, wings, and a cockpit.</td> -</tr> -<tr> -<td>A pyramid</td> -<td>An ancient and mysterious monument with stairs, chambers, and traps.</td> -</tr> -<tr> -<td>A pixel art</td> -<td>A colorful and artistic representation of a character or an object using blocks.</td> -</tr> -</table> -<p>These are some of the examples of amazing builds by other players in Minecraft Crafting and Building. You can find more examples by exploring the game or searching online. You can also share your own builds with others by using the camera mode and the chat system in the game.</p> - <h2>Conclusion</h2> -<h3>A summary of the main points of the article</h3> -<p>Minecraft Crafting and Building is a new free building game that is inspired by Minecraft but has some unique features and gameplay. In this article, we have shown you how to download Minecraft Crafting and Building for free, how to start playing the game, how to craft and build in the game, and how to improve your Minecraft builds with some tips and tricks. We hope that you have enjoyed reading this article and that you have learned something new about the game.</p> - <h3>A call to action for the readers to download and play the game</h3> -<p>If you are interested in Minecraft Crafting and Building, we encourage you to download the game from Google Play Store and try it out for yourself. You can create your own world with blocks and items, play online with your friends or other players, and have fun building anything you can imagine. You can also share your feedback and suggestions with the developer at protonmobile@gmail.com or leave a review on Google Play Store. Thank you for reading this article and happy crafting and building!</p> - <h2>FAQs</h2> -<h3>Q1: Is Minecraft Crafting and Building safe to download and play?</h3> -<p>A1: Yes, Minecraft Crafting and Building is safe to download and play. The game does not contain any viruses, malware, or harmful content. The game also does not require any personal information or permissions from your device. The game is rated E for Everyone by Google Play Store and is suitable for all ages.</p> - <h3>Q2: Can I play Minecraft Crafting and Building with my friends online?</h3> -<p>A2: Yes, you can play Minecraft Crafting and Building with your friends online. The game has a multiplayer mode where you can join or create a server and play online with your friends or other players from around the world. You can visit their worlds, chat with them, help them with their builds, or compete with them in building contests. You can also choose your character's appearance, gender, skin color, hair style, clothes, and accessories.</p> - <h3>Q3: Can I customize my character in Minecraft Crafting and Building?</h3> -<p>A3: Yes, you can customize your character in Minecraft Crafting and Building. The game allows you to choose your character's appearance, gender, skin color, hair style, clothes, and accessories. You can access the character customization menu by tapping on the menu button on the top left corner of the screen and then tapping on the character button. You can then scroll through the different options and select the ones that you like. You can also change your character's name by tapping on the name button and typing your desired name.</p> - <h3>Q4: Can I import or export my builds in Minecraft Crafting and Building?</h3> -<p>A4: Yes, you can import or export your builds in Minecraft Crafting and Building. The game allows you to save your builds as files and share them with others. You can also load other players' builds as files and play with them. You can access the import and export menu by tapping on the menu button on the top left corner of the screen and then tapping on the import/export button. You can then choose to import or export a file from your device or from a URL. You can also scan a QR code to import or export a file.</p> - <h3>Q5: Can I play Minecraft Crafting and Building on other devices besides Android?</h3> -<p>A5: No, Minecraft Crafting and Building is currently only available for Android devices. The game is not compatible with iOS, Windows, Mac, or other platforms. However, the developer might release the game for other devices in the future. You can follow the developer's updates and news on their website at https://protonmobile.com/ or on their social media accounts at https://www.facebook.com/protonmobile/ and https://twitter.com/protonmobile/.</p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fffiloni/audioldm-text-to-audio-generation-copy/audioldm/clap/encoders.py b/spaces/fffiloni/audioldm-text-to-audio-generation-copy/audioldm/clap/encoders.py deleted file mode 100644 index 5effd8efd3b933888c586199b5eaa89e632cab03..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/audioldm-text-to-audio-generation-copy/audioldm/clap/encoders.py +++ /dev/null @@ -1,170 +0,0 @@ -import torch -import torch.nn as nn -from audioldm.clap.open_clip import create_model -from audioldm.clap.training.data import get_audio_features -import torchaudio -from transformers import RobertaTokenizer -import torch.nn.functional as F - - -class CLAPAudioEmbeddingClassifierFreev2(nn.Module): - def __init__( - self, - pretrained_path="", - key="class", - sampling_rate=16000, - embed_mode="audio", - amodel = "HTSAT-tiny", - unconditional_prob=0.1, - random_mute=False, - max_random_mute_portion=0.5, - training_mode=True, - ): - super().__init__() - - self.key = key - self.device = "cpu" - self.precision = "fp32" - self.amodel = amodel - self.tmodel = "roberta" # the best text encoder in our training - self.enable_fusion = False # False if you do not want to use the fusion model - self.fusion_type = "aff_2d" - self.pretrained = pretrained_path - self.embed_mode = embed_mode - self.embed_mode_orig = embed_mode - self.sampling_rate = sampling_rate - self.unconditional_prob = unconditional_prob - self.random_mute = random_mute - self.tokenize = RobertaTokenizer.from_pretrained("roberta-base") - self.max_random_mute_portion = max_random_mute_portion - self.training_mode = training_mode - self.model, self.model_cfg = create_model( - self.amodel, - self.tmodel, - self.pretrained, - precision=self.precision, - device=self.device, - enable_fusion=self.enable_fusion, - fusion_type=self.fusion_type, - ) - for p in self.model.parameters(): - p.requires_grad = False - - self.model.eval() - - def get_unconditional_condition(self, batchsize): - self.unconditional_token = self.model.get_text_embedding( - self.tokenizer(["", ""]) - )[0:1] - return torch.cat([self.unconditional_token.unsqueeze(0)] * batchsize, dim=0) - - def batch_to_list(self, batch): - ret = [] - for i in range(batch.size(0)): - ret.append(batch[i]) - return ret - - def make_decision(self, probability): - if float(torch.rand(1)) < probability: - return True - else: - return False - - def random_uniform(self, start, end): - val = torch.rand(1).item() - return start + (end - start) * val - - def _random_mute(self, waveform): - # waveform: [bs, t-steps] - t_steps = waveform.size(-1) - for i in range(waveform.size(0)): - mute_size = int( - self.random_uniform(0, end=int(t_steps * self.max_random_mute_portion)) - ) - mute_start = int(self.random_uniform(0, t_steps - mute_size)) - waveform[i, mute_start : mute_start + mute_size] = 0 - return waveform - - def cos_similarity(self, waveform, text): - # waveform: [bs, t_steps] - with torch.no_grad(): - self.embed_mode = "audio" - audio_emb = self(waveform.cuda()) - self.embed_mode = "text" - text_emb = self(text) - similarity = F.cosine_similarity(audio_emb, text_emb, dim=2) - return similarity.squeeze() - - def forward(self, batch, key=None): - # If you want this conditioner to be unconditional, set self.unconditional_prob = 1.0 - # If you want this conditioner to be fully conditional, set self.unconditional_prob = 0.0 - if self.model.training == True and not self.training_mode: - print( - "The pretrained CLAP model should always be in eval mode. Reloading model just in case you change the parameters." - ) - self.model, self.model_cfg = create_model( - self.amodel, - self.tmodel, - self.pretrained, - precision=self.precision, - device="cuda", - enable_fusion=self.enable_fusion, - fusion_type=self.fusion_type, - ) - for p in self.model.parameters(): - p.requires_grad = False - self.model.eval() - - # the 'fusion' truncate mode can be changed to 'rand_trunc' if run in unfusion mode - if self.embed_mode == "audio": - with torch.no_grad(): - audio_dict_list = [] - assert ( - self.sampling_rate == 16000 - ), "We only support 16000 sampling rate" - if self.random_mute: - batch = self._random_mute(batch) - # batch: [bs, 1, t-samples] - batch = torchaudio.functional.resample( - batch, orig_freq=self.sampling_rate, new_freq=48000 - ) - for waveform in self.batch_to_list(batch): - audio_dict = {} - audio_dict = get_audio_features( - audio_dict, - waveform, - 480000, - data_truncating="fusion", - data_filling="repeatpad", - audio_cfg=self.model_cfg["audio_cfg"], - ) - audio_dict_list.append(audio_dict) - # [bs, 512] - embed = self.model.get_audio_embedding(audio_dict_list) - elif self.embed_mode == "text": - with torch.no_grad(): - # the 'fusion' truncate mode can be changed to 'rand_trunc' if run in unfusion mode - text_data = self.tokenizer(batch) - embed = self.model.get_text_embedding(text_data) - - embed = embed.unsqueeze(1) - self.unconditional_token = self.model.get_text_embedding( - self.tokenizer(["", ""]) - )[0:1] - - for i in range(embed.size(0)): - if self.make_decision(self.unconditional_prob): - embed[i] = self.unconditional_token - - # [bs, 1, 512] - return embed.detach() - - def tokenizer(self, text): - result = self.tokenize( - text, - padding="max_length", - truncation=True, - max_length=512, - return_tensors="pt", - ) - return {k: v.squeeze(0) for k, v in result.items()} diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/accepts/README.md b/spaces/fffiloni/controlnet-animation-doodle/node_modules/accepts/README.md deleted file mode 100644 index 82680c530c3886540f630f643990e2ec707319d1..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/accepts/README.md +++ /dev/null @@ -1,140 +0,0 @@ -# accepts - -[![NPM Version][npm-version-image]][npm-url] -[![NPM Downloads][npm-downloads-image]][npm-url] -[![Node.js Version][node-version-image]][node-version-url] -[![Build Status][github-actions-ci-image]][github-actions-ci-url] -[![Test Coverage][coveralls-image]][coveralls-url] - -Higher level content negotiation based on [negotiator](https://www.npmjs.com/package/negotiator). -Extracted from [koa](https://www.npmjs.com/package/koa) for general use. - -In addition to negotiator, it allows: - -- Allows types as an array or arguments list, ie `(['text/html', 'application/json'])` - as well as `('text/html', 'application/json')`. -- Allows type shorthands such as `json`. -- Returns `false` when no types match -- Treats non-existent headers as `*` - -## Installation - -This is a [Node.js](https://nodejs.org/en/) module available through the -[npm registry](https://www.npmjs.com/). Installation is done using the -[`npm install` command](https://docs.npmjs.com/getting-started/installing-npm-packages-locally): - -```sh -$ npm install accepts -``` - -## API - -```js -var accepts = require('accepts') -``` - -### accepts(req) - -Create a new `Accepts` object for the given `req`. - -#### .charset(charsets) - -Return the first accepted charset. If nothing in `charsets` is accepted, -then `false` is returned. - -#### .charsets() - -Return the charsets that the request accepts, in the order of the client's -preference (most preferred first). - -#### .encoding(encodings) - -Return the first accepted encoding. If nothing in `encodings` is accepted, -then `false` is returned. - -#### .encodings() - -Return the encodings that the request accepts, in the order of the client's -preference (most preferred first). - -#### .language(languages) - -Return the first accepted language. If nothing in `languages` is accepted, -then `false` is returned. - -#### .languages() - -Return the languages that the request accepts, in the order of the client's -preference (most preferred first). - -#### .type(types) - -Return the first accepted type (and it is returned as the same text as what -appears in the `types` array). If nothing in `types` is accepted, then `false` -is returned. - -The `types` array can contain full MIME types or file extensions. Any value -that is not a full MIME types is passed to `require('mime-types').lookup`. - -#### .types() - -Return the types that the request accepts, in the order of the client's -preference (most preferred first). - -## Examples - -### Simple type negotiation - -This simple example shows how to use `accepts` to return a different typed -respond body based on what the client wants to accept. The server lists it's -preferences in order and will get back the best match between the client and -server. - -```js -var accepts = require('accepts') -var http = require('http') - -function app (req, res) { - var accept = accepts(req) - - // the order of this list is significant; should be server preferred order - switch (accept.type(['json', 'html'])) { - case 'json': - res.setHeader('Content-Type', 'application/json') - res.write('{"hello":"world!"}') - break - case 'html': - res.setHeader('Content-Type', 'text/html') - res.write('<b>hello, world!</b>') - break - default: - // the fallback is text/plain, so no need to specify it above - res.setHeader('Content-Type', 'text/plain') - res.write('hello, world!') - break - } - - res.end() -} - -http.createServer(app).listen(3000) -``` - -You can test this out with the cURL program: -```sh -curl -I -H'Accept: text/html' http://localhost:3000/ -``` - -## License - -[MIT](LICENSE) - -[coveralls-image]: https://badgen.net/coveralls/c/github/jshttp/accepts/master -[coveralls-url]: https://coveralls.io/r/jshttp/accepts?branch=master -[github-actions-ci-image]: https://badgen.net/github/checks/jshttp/accepts/master?label=ci -[github-actions-ci-url]: https://github.com/jshttp/accepts/actions/workflows/ci.yml -[node-version-image]: https://badgen.net/npm/node/accepts -[node-version-url]: https://nodejs.org/en/download -[npm-downloads-image]: https://badgen.net/npm/dm/accepts -[npm-url]: https://npmjs.org/package/accepts -[npm-version-image]: https://badgen.net/npm/v/accepts diff --git a/spaces/firestalker/anime-tts/attentions.py b/spaces/firestalker/anime-tts/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/firestalker/anime-tts/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/flax-community/DietNerf-Demo/jaxnerf/run.sh b/spaces/flax-community/DietNerf-Demo/jaxnerf/run.sh deleted file mode 100644 index 9265970f3be96e04fa095e8821d879f09cbe7009..0000000000000000000000000000000000000000 --- a/spaces/flax-community/DietNerf-Demo/jaxnerf/run.sh +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2021 The Google Research Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#!/bin/bash -set -e -set -x - -virtualenv -p python3 . -source ./bin/activate - -pip install -r jaxnerf/requirements.txt -pip uninstall jax -pip install --upgrade pip -pip install "jax[tpu]>=0.2.16" -f https://storage.googleapis.com/jax-releases/libtpu_releases.html -python -m jaxnerf.train \ - --data_dir=/mnt/data/NeRF_Data/nerf_synthetic/lego \ - --train_dir=test_output \ - --max_steps=5 \ - --factor=2 \ - --batch_size=512 \ - --config=configs/orig_nerf_tpu_vm_test \ - --precompute_pkl_path /mnt/data/NeRF_Data/nerf_synthetic/lego/clip_cache_train_factor4_float32.pkl diff --git a/spaces/flax-community/Multilingual-VQA/sections/checkpoints/other_checkpoints.md b/spaces/flax-community/Multilingual-VQA/sections/checkpoints/other_checkpoints.md deleted file mode 100644 index c4546aed7803ba08f1784ea883ae4693b9bcb934..0000000000000000000000000000000000000000 --- a/spaces/flax-community/Multilingual-VQA/sections/checkpoints/other_checkpoints.md +++ /dev/null @@ -1,6 +0,0 @@ -- All pre-trained checkpoints: [multilingual-vqa](https://huggingface.co/flax-community/multilingual-vqa) -- Fine-tuned checkpoints on 45k pre-trained checkpoint: [multilingual-vqa-pt-45k-ft](https://huggingface.co/flax-community/multilingual-vqa-pt-45k-ft) -- Fine-tuned checkpoints on 45k pre-trained checkpoint with AdaFactor (others use AdamW): [multilingual-vqa-pt-45k-ft-adf](https://huggingface.co/flax-community/multilingual-vqa-pt-45k-ft-adf) -- Fine-tuned checkpoints on 60k pre-trained checkpoint: [multilingual-vqa-pt-60k-ft](https://huggingface.co/flax-community/multilingual-vqa-pt-60k-ft) -- Fine-tuned checkpoints on 70k pre-trained checkpoint: [multilingual-vqa-pt-60k-ft](https://huggingface.co/flax-community/multilingual-vqa-pt-70k-ft) -- From scratch (without MLM pre-training) model: [multilingual-vqa-ft](https://huggingface.co/flax-community/multilingual-vqa-ft) \ No newline at end of file diff --git a/spaces/flax-community/multilingual-image-captioning/apps/model/flax_clip_vision_mbart/modeling_clip_vision_utils.py b/spaces/flax-community/multilingual-image-captioning/apps/model/flax_clip_vision_mbart/modeling_clip_vision_utils.py deleted file mode 100644 index 2c85df25c4dd9a311e4e01f8a90621feab9db539..0000000000000000000000000000000000000000 --- a/spaces/flax-community/multilingual-image-captioning/apps/model/flax_clip_vision_mbart/modeling_clip_vision_utils.py +++ /dev/null @@ -1,451 +0,0 @@ -# NEW - -import os - -# from functools import partial -from pickle import UnpicklingError -from typing import Dict, Set, Tuple, Union - -import flax.linen as nn -import jax -import jax.numpy as jnp -from flax.core.frozen_dict import FrozenDict, unfreeze -from flax.serialization import from_bytes, to_bytes -from flax.traverse_util import flatten_dict, unflatten_dict -from jax.random import PRNGKey -from transformers.configuration_utils import PretrainedConfig -from transformers.file_utils import ( - FLAX_WEIGHTS_NAME, - WEIGHTS_NAME, - PushToHubMixin, - cached_path, - hf_bucket_url, - is_offline_mode, - is_remote_url, -) -from transformers.modeling_flax_pytorch_utils import ( - load_pytorch_checkpoint_in_flax_state_dict, -) -from transformers.utils import logging - -from .generation_clip_vision_utils import FlaxCLIPVisionMBartGenerationMixin - -logger = logging.get_logger(__name__) - - -class FlaxCLIPVisionMBartPreTrainedModel( - PushToHubMixin, FlaxCLIPVisionMBartGenerationMixin -): - r""" - Base class for all models. - :class:`~transformers.FlaxPreTrainedModel` takes care of storing the configuration of the models and handles - methods for loading, downloading and saving models. - Class attributes (overridden by derived classes): - - **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of - :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture. - - **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in - derived classes of the same architecture adding modules on top of the base model. - """ - config_class = None - base_model_prefix = "" - - def __init__( - self, - config: PretrainedConfig, - module: nn.Module, - input_shape: Tuple = (1, 1), - seed: int = 0, - dtype: jnp.dtype = jnp.float32, - ): - if config is None: - raise ValueError("config cannot be None") - - if module is None: - raise ValueError("module cannot be None") - - # Those are private to be exposed as typed property on derived classes. - self._config = config - self._module = module - - # Those are public as their type is generic to every derived classes. - self.key = PRNGKey(seed) - self.dtype = dtype - - # randomly initialized parameters - random_params = self.init_weights(self.key, input_shape) - - # save required_params as set - self._required_params = set(flatten_dict(unfreeze(random_params)).keys()) - self.params = random_params - - def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> Dict: - raise NotImplementedError(f"init method has to be implemented for {self}") - - @classmethod - def _from_config(cls, config, **kwargs): - """ - All context managers that the model should be initialized under go here. - """ - return cls(config, **kwargs) - - @property - def config(self) -> PretrainedConfig: - return self._config - - @property - def module(self) -> nn.Module: - return self._module - - @property - def params(self) -> Union[Dict, FrozenDict]: - return self._params - - @property - def required_params(self) -> Set: - return self._required_params - - @params.setter - def params(self, params: Union[Dict, FrozenDict]): - if isinstance(params, FrozenDict): - params = unfreeze(params) - param_keys = set(flatten_dict(params).keys()) - if len(self.required_params - param_keys) > 0: - raise ValueError( - "Some parameters are missing. Make sure that `params` include the following " - f"parameters {self.required_params - param_keys}" - ) - self._params = params - - @classmethod - def from_pretrained( - cls, - pretrained_model_name_or_path: Union[str, os.PathLike], - dtype: jnp.dtype = jnp.float32, - *model_args, - **kwargs, - ): - - r""" - Instantiate a pretrained flax model from a pre-trained model configuration. - The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come - pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning - task. - The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those - weights are discarded. - Parameters: - pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`): - Can be either: - - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. - Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under - a user or organization name, like ``dbmdz/bert-base-german-cased``. - - A path to a `directory` containing model weights saved using - :func:`~transformers.FlaxPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - - A path or url to a `pt index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In this - case, ``from_pt`` should be set to :obj:`True`. - model_args (sequence of positional arguments, `optional`): - All remaning positional arguments will be passed to the underlying model's ``__init__`` method. - config (:obj:`Union[PretrainedConfig, str, os.PathLike]`, `optional`): - Can be either: - - an instance of a class derived from :class:`~transformers.PretrainedConfig`, - - a string or path valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`. - Configuration for the model to use instead of an automatically loaded configuation. Configuration can - be automatically loaded when: - - The model is a model provided by the library (loaded with the `model id` string of a pretrained - model). - - The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded - by supplying the save directory. - - The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a - configuration JSON file named `config.json` is found in the directory. - cache_dir (:obj:`Union[str, os.PathLike]`, `optional`): - Path to a directory in which a downloaded pretrained model configuration should be cached if the - standard cache should not be used. - from_pt (:obj:`bool`, `optional`, defaults to :obj:`False`): - Load the model weights from a PyTorch checkpoint save file (see docstring of - ``pretrained_model_name_or_path`` argument). - force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): - Whether or not to force the (re-)download of the model weights and configuration files, overriding the - cached versions if they exist. - resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): - Whether or not to delete incompletely received files. Will attempt to resume the download if such a - file exists. - proxies (:obj:`Dict[str, str], `optional`): - A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', - 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. - local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`): - Whether or not to only look at local files (i.e., do not try to download the model). - revision(:obj:`str`, `optional`, defaults to :obj:`"main"`): - The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a - git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any - identifier allowed by git. - kwargs (remaining dictionary of keyword arguments, `optional`): - Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., - :obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or - automatically loaded: - - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the - underlying model's ``__init__`` method (we assume all relevant updates to the configuration have - already been done) - - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class - initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of - ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute - with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration - attribute will be passed to the underlying model's ``__init__`` function. - Examples:: - >>> from transformers import BertConfig, FlaxBertModel - >>> # Download model and configuration from huggingface.co and cache. - >>> model = FlaxBertModel.from_pretrained('bert-base-cased') - >>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable). - >>> model = FlaxBertModel.from_pretrained('./test/saved_model/') - >>> # Loading from a PyTorch checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). - >>> config = BertConfig.from_json_file('./pt_model/config.json') - >>> model = FlaxBertModel.from_pretrained('./pt_model/pytorch_model.bin', from_pt=True, config=config) - """ - config = kwargs.pop("config", None) - cache_dir = kwargs.pop("cache_dir", None) - from_pt = kwargs.pop("from_pt", False) - force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) - proxies = kwargs.pop("proxies", None) - local_files_only = kwargs.pop("local_files_only", False) - use_auth_token = kwargs.pop("use_auth_token", None) - revision = kwargs.pop("revision", None) - from_pipeline = kwargs.pop("_from_pipeline", None) - from_auto_class = kwargs.pop("_from_auto", False) - - user_agent = { - "file_type": "model", - "framework": "flax", - "from_auto_class": from_auto_class, - } - if from_pipeline is not None: - user_agent["using_pipeline"] = from_pipeline - - if is_offline_mode() and not local_files_only: - logger.info("Offline mode: forcing local_files_only=True") - local_files_only = True - - # Load config if we don't provide a configuration - if not isinstance(config, PretrainedConfig): - config_path = ( - config if config is not None else pretrained_model_name_or_path - ) - config, model_kwargs = cls.config_class.from_pretrained( - config_path, - *model_args, - cache_dir=cache_dir, - return_unused_kwargs=True, - force_download=force_download, - resume_download=resume_download, - proxies=proxies, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - revision=revision, - _from_auto=from_auto_class, - _from_pipeline=from_pipeline, - **kwargs, - ) - else: - model_kwargs = kwargs - - # Add the dtype to model_kwargs - model_kwargs["dtype"] = dtype - - # Load model - if pretrained_model_name_or_path is not None: - if os.path.isdir(pretrained_model_name_or_path): - if from_pt and os.path.isfile( - os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) - ): - # Load from a PyTorch checkpoint - archive_file = os.path.join( - pretrained_model_name_or_path, WEIGHTS_NAME - ) - elif os.path.isfile( - os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME) - ): - # Load from a Flax checkpoint - archive_file = os.path.join( - pretrained_model_name_or_path, FLAX_WEIGHTS_NAME - ) - else: - raise EnvironmentError( - f"Error no file named {[FLAX_WEIGHTS_NAME, WEIGHTS_NAME]} found in directory " - f"{pretrained_model_name_or_path} or `from_pt` set to False" - ) - elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url( - pretrained_model_name_or_path - ): - archive_file = pretrained_model_name_or_path - else: - archive_file = hf_bucket_url( - pretrained_model_name_or_path, - filename=WEIGHTS_NAME if from_pt else FLAX_WEIGHTS_NAME, - revision=revision, - ) - - # redirect to the cache, if necessary - try: - resolved_archive_file = cached_path( - archive_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - user_agent=user_agent, - ) - except EnvironmentError as err: - logger.error(err) - msg = ( - f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n" - f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n" - f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named {WEIGHTS_NAME}.\n\n" - ) - raise EnvironmentError(msg) - - if resolved_archive_file == archive_file: - logger.info(f"loading weights file {archive_file}") - else: - logger.info( - f"loading weights file {archive_file} from cache at {resolved_archive_file}" - ) - else: - resolved_archive_file = None - - # init random models - model = cls(config, *model_args, **model_kwargs) - - if from_pt: - state = load_pytorch_checkpoint_in_flax_state_dict( - model, resolved_archive_file - ) - else: - with open(resolved_archive_file, "rb") as state_f: - try: - state = from_bytes(cls, state_f.read()) - except UnpicklingError: - raise EnvironmentError( - f"Unable to convert {archive_file} to Flax deserializable object. " - ) - # make sure all arrays are stored as jnp.arrays - # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4: - # https://github.com/google/flax/issues/1261 - state = jax.tree_util.tree_map(jnp.array, state) - - # if model is base model only use model_prefix key - if ( - cls.base_model_prefix not in dict(model.params) - and cls.base_model_prefix in state - ): - state = state[cls.base_model_prefix] - - # if model is head model and we are loading weights from base model - # we initialize new params dict with base_model_prefix - if ( - cls.base_model_prefix in dict(model.params) - and cls.base_model_prefix not in state - ): - state = {cls.base_model_prefix: state} - - # flatten dicts - state = flatten_dict(state) - - random_state = flatten_dict(unfreeze(model.params)) - - missing_keys = model.required_params - set(state.keys()) - unexpected_keys = set(state.keys()) - model.required_params - - # add missing keys as random parameters - for missing_key in missing_keys: - state[missing_key] = random_state[missing_key] - - # remove unexpected keys to not be saved again - for unexpected_key in unexpected_keys: - del state[unexpected_key] - - if len(unexpected_keys) > 0: - logger.warning( - f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when " - f"initializing {model.__class__.__name__}: {unexpected_keys}\n" - f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task " - f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n" - f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect " - f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." - ) - else: - logger.info( - f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n" - ) - - if len(missing_keys) > 0: - logger.warning( - f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " - f"and are newly initialized: {missing_keys}\n" - f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." - ) - else: - logger.info( - f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n" - f"If your task is similar to the task the model of the checkpoint was trained on, " - f"you can already use {model.__class__.__name__} for predictions without further training." - ) - - # set correct parameters - model.params = unflatten_dict(state) - - return model - - def save_pretrained( - self, - save_directory: Union[str, os.PathLike], - params=None, - push_to_hub=False, - **kwargs, - ): - """ - Save a model and its configuration file to a directory, so that it can be re-loaded using the - `:func:`~transformers.FlaxPreTrainedModel.from_pretrained`` class method - Arguments: - save_directory (:obj:`str` or :obj:`os.PathLike`): - Directory to which to save. Will be created if it doesn't exist. - push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`): - Whether or not to push your model to the Hugging Face model hub after saving it. - .. warning:: - Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with - :obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are - pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory - instead. - kwargs: - Additional key word arguments passed along to the - :meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method. - """ - if os.path.isfile(save_directory): - logger.error( - f"Provided path ({save_directory}) should be a directory, not a file" - ) - return - - if push_to_hub: - commit_message = kwargs.pop("commit_message", None) - repo = self._create_or_get_repo(save_directory, **kwargs) - - os.makedirs(save_directory, exist_ok=True) - - # get abs dir - save_directory = os.path.abspath(save_directory) - # save config as well - self.config.architectures = [self.__class__.__name__[4:]] - self.config.save_pretrained(save_directory) - - # save model - output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME) - with open(output_model_file, "wb") as f: - params = params if params is not None else self.params - model_bytes = to_bytes(params) - f.write(model_bytes) - - logger.info(f"Model weights saved in {output_model_file}") - - if push_to_hub: - url = self._push_to_hub(repo, commit_message=commit_message) - logger.info(f"Model pushed to the hub in this commit: {url}") diff --git a/spaces/florim/MedGPT/autogpt/app.py b/spaces/florim/MedGPT/autogpt/app.py deleted file mode 100644 index 58d9f7164ddfbb5019b072d789dc2fa6205dc9d3..0000000000000000000000000000000000000000 --- a/spaces/florim/MedGPT/autogpt/app.py +++ /dev/null @@ -1,330 +0,0 @@ -""" Command and Control """ -import json -from typing import Dict, List, NoReturn, Union - -from autogpt.agent.agent_manager import AgentManager -from autogpt.commands.analyze_code import analyze_code -from autogpt.commands.audio_text import read_audio_from_file -from autogpt.commands.execute_code import ( - execute_python_file, - execute_shell, - execute_shell_popen, -) -from autogpt.commands.file_operations import ( - append_to_file, - delete_file, - download_file, - read_file, - search_files, - write_to_file, -) -from autogpt.commands.git_operations import clone_repository -from autogpt.commands.google_search import google_official_search, google_search -from autogpt.commands.image_gen import generate_image -from autogpt.commands.improve_code import improve_code -from autogpt.commands.twitter import send_tweet -from autogpt.commands.web_requests import scrape_links, scrape_text -from autogpt.commands.web_selenium import browse_website -from autogpt.commands.write_tests import write_tests -from autogpt.config import Config -from autogpt.json_utils.json_fix_llm import fix_and_parse_json -from autogpt.memory import get_memory -from autogpt.processing.text import summarize_text -from autogpt.speech import say_text - -CFG = Config() -AGENT_MANAGER = AgentManager() - - -def is_valid_int(value: str) -> bool: - """Check if the value is a valid integer - - Args: - value (str): The value to check - - Returns: - bool: True if the value is a valid integer, False otherwise - """ - try: - int(value) - return True - except ValueError: - return False - - -def get_command(response_json: Dict): - """Parse the response and return the command name and arguments - - Args: - response_json (json): The response from the AI - - Returns: - tuple: The command name and arguments - - Raises: - json.decoder.JSONDecodeError: If the response is not valid JSON - - Exception: If any other error occurs - """ - try: - if "command" not in response_json: - return "Error:", "Missing 'command' object in JSON" - - if not isinstance(response_json, dict): - return "Error:", f"'response_json' object is not dictionary {response_json}" - - command = response_json["command"] - if not isinstance(command, dict): - return "Error:", "'command' object is not a dictionary" - - if "name" not in command: - return "Error:", "Missing 'name' field in 'command' object" - - command_name = command["name"] - - # Use an empty dictionary if 'args' field is not present in 'command' object - arguments = command.get("args", {}) - - return command_name, arguments - except json.decoder.JSONDecodeError: - return "Error:", "Invalid JSON" - # All other errors, return "Error: + error message" - except Exception as e: - return "Error:", str(e) - - -def map_command_synonyms(command_name: str): - """Takes the original command name given by the AI, and checks if the - string matches a list of common/known hallucinations - """ - synonyms = [ - ("write_file", "write_to_file"), - ("create_file", "write_to_file"), - ("search", "google"), - ] - for seen_command, actual_command_name in synonyms: - if command_name == seen_command: - return actual_command_name - return command_name - - -def execute_command(command_name: str, arguments): - """Execute the command and return the result - - Args: - command_name (str): The name of the command to execute - arguments (dict): The arguments for the command - - Returns: - str: The result of the command - """ - try: - command_name = map_command_synonyms(command_name.lower()) - if command_name == "google": - # Check if the Google API key is set and use the official search method - # If the API key is not set or has only whitespaces, use the unofficial - # search method - key = CFG.google_api_key - if key and key.strip() and key != "your-google-api-key": - google_result = google_official_search(arguments["input"]) - return google_result - else: - google_result = google_search(arguments["input"]) - - # google_result can be a list or a string depending on the search results - if isinstance(google_result, list): - safe_message = [ - google_result_single.encode("utf-8", "ignore") - for google_result_single in google_result - ] - else: - safe_message = google_result.encode("utf-8", "ignore") - - return safe_message.decode("utf-8") - elif command_name == "memory_add": - memory = get_memory(CFG) - return memory.add(arguments["string"]) - elif command_name == "start_agent": - return start_agent( - arguments["name"], arguments["task"], arguments["prompt"] - ) - elif command_name == "message_agent": - return message_agent(arguments["key"], arguments["message"]) - elif command_name == "list_agents": - return list_agents() - elif command_name == "delete_agent": - return delete_agent(arguments["key"]) - elif command_name == "get_text_summary": - return get_text_summary(arguments["url"], arguments["question"]) - elif command_name == "get_hyperlinks": - return get_hyperlinks(arguments["url"]) - elif command_name == "clone_repository": - return clone_repository( - arguments["repository_url"], arguments["clone_path"] - ) - elif command_name == "read_file": - return read_file(arguments["file"]) - elif command_name == "write_to_file": - return write_to_file(arguments["file"], arguments["text"]) - elif command_name == "append_to_file": - return append_to_file(arguments["file"], arguments["text"]) - elif command_name == "delete_file": - return delete_file(arguments["file"]) - elif command_name == "search_files": - return search_files(arguments["directory"]) - elif command_name == "download_file": - if not CFG.allow_downloads: - return "Error: You do not have user authorization to download files locally." - return download_file(arguments["url"], arguments["file"]) - elif command_name == "browse_website": - return browse_website(arguments["url"], arguments["question"]) - # TODO: Change these to take in a file rather than pasted code, if - # non-file is given, return instructions "Input should be a python - # filepath, write your code to file and try again" - elif command_name == "analyze_code": - return analyze_code(arguments["code"]) - elif command_name == "improve_code": - return improve_code(arguments["suggestions"], arguments["code"]) - elif command_name == "write_tests": - return write_tests(arguments["code"], arguments.get("focus")) - elif command_name == "execute_python_file": # Add this command - return execute_python_file(arguments["file"]) - elif command_name == "execute_shell": - if CFG.execute_local_commands: - return execute_shell(arguments["command_line"]) - else: - return ( - "You are not allowed to run local shell commands. To execute" - " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " - "in your config. Do not attempt to bypass the restriction." - ) - elif command_name == "execute_shell_popen": - if CFG.execute_local_commands: - return execute_shell_popen(arguments["command_line"]) - else: - return ( - "You are not allowed to run local shell commands. To execute" - " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " - "in your config. Do not attempt to bypass the restriction." - ) - elif command_name == "read_audio_from_file": - return read_audio_from_file(arguments["file"]) - elif command_name == "generate_image": - return generate_image(arguments["prompt"]) - elif command_name == "send_tweet": - return send_tweet(arguments["text"]) - elif command_name == "do_nothing": - return "No action performed." - elif command_name == "task_complete": - shutdown() - else: - return ( - f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'" - " list for available commands and only respond in the specified JSON" - " format." - ) - except Exception as e: - return f"Error: {str(e)}" - - -def get_text_summary(url: str, question: str) -> str: - """Return the results of a Google search - - Args: - url (str): The url to scrape - question (str): The question to summarize the text for - - Returns: - str: The summary of the text - """ - text = scrape_text(url) - summary = summarize_text(url, text, question) - return f""" "Result" : {summary}""" - - -def get_hyperlinks(url: str) -> Union[str, List[str]]: - """Return the results of a Google search - - Args: - url (str): The url to scrape - - Returns: - str or list: The hyperlinks on the page - """ - return scrape_links(url) - - -def shutdown() -> NoReturn: - """Shut down the program""" - print("Shutting down...") - quit() - - -def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str: - """Start an agent with a given name, task, and prompt - - Args: - name (str): The name of the agent - task (str): The task of the agent - prompt (str): The prompt for the agent - model (str): The model to use for the agent - - Returns: - str: The response of the agent - """ - # Remove underscores from name - voice_name = name.replace("_", " ") - - first_message = f"""You are {name}. Respond with: "Acknowledged".""" - agent_intro = f"{voice_name} here, Reporting for duty!" - - # Create agent - if CFG.speak_mode: - say_text(agent_intro, 1) - key, ack = AGENT_MANAGER.create_agent(task, first_message, model) - - if CFG.speak_mode: - say_text(f"Hello {voice_name}. Your task is as follows. {task}.") - - # Assign task (prompt), get response - agent_response = AGENT_MANAGER.message_agent(key, prompt) - - return f"Agent {name} created with key {key}. First response: {agent_response}" - - -def message_agent(key: str, message: str) -> str: - """Message an agent with a given key and message""" - # Check if the key is a valid integer - if is_valid_int(key): - agent_response = AGENT_MANAGER.message_agent(int(key), message) - else: - return "Invalid key, must be an integer." - - # Speak response - if CFG.speak_mode: - say_text(agent_response, 1) - return agent_response - - -def list_agents(): - """List all agents - - Returns: - str: A list of all agents - """ - return "List of agents:\n" + "\n".join( - [str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()] - ) - - -def delete_agent(key: str) -> str: - """Delete an agent with a given key - - Args: - key (str): The key of the agent to delete - - Returns: - str: A message indicating whether the agent was deleted or not - """ - result = AGENT_MANAGER.delete_agent(key) - return f"Agent {key} deleted." if result else f"Agent {key} does not exist." diff --git a/spaces/fsdl2022emotion/meme-manipulation-gradio-space/app.py b/spaces/fsdl2022emotion/meme-manipulation-gradio-space/app.py deleted file mode 100644 index adcd03581d3b0b04be3acf7d2b7d36deb95a23b5..0000000000000000000000000000000000000000 --- a/spaces/fsdl2022emotion/meme-manipulation-gradio-space/app.py +++ /dev/null @@ -1,115 +0,0 @@ -from functools import partial -import argparse - -import gradio as gr -import numpy as np - -from emotion_synthesizer.emotion_synthesis import EmotionSynthesizer -from drawer.simple_draw import add_text - - -DEFAULT_MODEL_PATH = "./emotion_synthesizer/learned_generators/gaus_2d/1800000-G.ckpt" -DEFAULT_MODEL_TYPE = "gaussian" - - -def make_meme(original_image, new_emotion, secondary_emotion=None, intensity=None, text=None, wandb_artifact=None): - # workaround for gradio bug (!?) - secondary_emotion = None if (secondary_emotion == "None" or secondary_emotion == "") else secondary_emotion - print(f"Secondary emotion: {secondary_emotion}") - - if wandb_artifact: - artifact_dir = artifact.download() - artifact_path = f"{artifact_dir}/1800000-G.ckpt" - model_type = artifact.metadata["model_type"] - model = EmotionSynthesizer(model_path=artifact_path, model_type=model_type) - else: - model = EmotionSynthesizer(DEFAULT_MODEL_PATH, DEFAULT_MODEL_TYPE) - try: - generated_image = model.predict(original_image, new_emotion, secondary_emotion, intensity) - except: - raise gr.Error(f"Cannot generate emotion {new_emotion} from the input image.") - - if text: - print(f"Adding text: {text}") - output_image = add_text(generated_image, text) - return output_image - - return generated_image - -def face_to_face(x): - return x - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument("--use_wandb", action="store_true", help="Use wandb artifact", default=False) - args = parser.parse_args() - use_wandb = args.use_wandb - - if use_wandb: - print(f"Using wandb artifact") - import wandb - run = wandb.init( - project="fsdl2022-emotion", - job_type='use-model', - entity="fsdl22", - ) - artifact = run.use_artifact("ganmut-model:production") - meme_app = partial(make_meme, wandb_artifact=artifact) - else: - print(f"Using default model: {DEFAULT_MODEL_PATH}") - meme_app = make_meme - - - with gr.Blocks( - title="MEME Manipulation Tool", - css =""" - .gradio-container {background-image: url('file=assets/wallpaper.png');background-repeat: no-repeat; background-size: cover;} - """ - ) as demo: - with gr.Accordion("About"): - gr.Markdown(""" - MEME Emotion Manipulation Tool is an open source project of the [Full Stack Deep Learning](https://fullstackdeeplearning.com) course.<br> - It is a tool that allows you to manipulate the emotions of a person in a photo. You can also add text on the image to create a meme. We will not save any input from the users. <br> - This tool used the pretrained model and is modified based on the [GANmut Model](https://github.com/stefanodapolito/GANmut). - You can view the source code of this tool in [GitHub](https://github.com/fsdl2022emotion/meme-manipulation-app) and [Gradio Space](https://huggingface.co/spaces/fsdl2022emotion/meme-manipulation-gradio-space) and give it a star if you like it!<br> - """) - with gr.Tab("Change emotion"): - with gr.Row(): - with gr.Column(): - emtion_image_input = gr.Image() - emotion_text_input = gr.Radio(["happy", "fear", "sad", "angry", "disgust", "surprise", "neutral"], label="Primary Emotion (Required)") - emotion_text_input2 = gr.Radio(["happy", "fear", "sad", "angry", "disgust", "surprise", "neutral"], label="Secondary Emotion (Optional)", value=None) - intensity = gr.Slider(0, 1, label="Intensity (Valid only if secondary emotion is selected)") - meme_text_input = gr.Textbox(lines=1, label="Meme text (Optional)") - change_emotion_button = gr.Button("Change emotion") - with gr.Row(scale=1): - emotion_image_output = gr.Image() - change_emotion_button.click(meme_app, inputs=[emtion_image_input, emotion_text_input, emotion_text_input2, intensity, meme_text_input], outputs=emotion_image_output, ) - - ############################# only show on demo day ############################# - with gr.Tab("original-image"): - with gr.Row(): - image_input = gr.Image() - image_output = gr.Image() - image_button = gr.Button("Convert") - image_button.click(face_to_face, inputs=image_input, outputs=image_output) - ################################################################################## - with gr.Accordion("Valid Mapping"): - gr.Markdown(""" - You can use purely the primary emotion or combine it with the secondary emotion for image generation. <br> - Yet only some of the combinations are valid. Please refer to the below mapping: <br> - ![valid mapping](https://i.ibb.co/5rCXgfB/Screenshot-2022-10-14-at-11-59-28-AM.png) - """) - gr.Examples(examples=[ - ["examples/charles-frye.jpeg", "surprise", "", 1.0, "When I got a new idea"], - ["examples/sergey.jpg", "neutral", "", 1.0, "I did smile"], - ["examples/josh.jpg", "angry", "", 1.0, "nasdaq index"], - ], - inputs=[emtion_image_input, emotion_text_input, emotion_text_input2, intensity, meme_text_input], - fn=meme_app, - ) - - - demo.launch(favicon_path="./assets/favicon.png") \ No newline at end of file diff --git a/spaces/fuloo/newbing/README.md b/spaces/fuloo/newbing/README.md deleted file mode 100644 index efac4191467e3c01254eef46d75c81cb1d183f0d..0000000000000000000000000000000000000000 --- a/spaces/fuloo/newbing/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Newbing -emoji: 🔥 -colorFrom: pink -colorTo: pink -sdk: docker -pinned: false -license: mit -app_port: 8080 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/mlsd/utils.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/mlsd/utils.py deleted file mode 100644 index ae3cf9420a33a4abae27c48ac4b90938c7d63cc3..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/mlsd/utils.py +++ /dev/null @@ -1,580 +0,0 @@ -''' -modified by lihaoweicv -pytorch version -''' - -''' -M-LSD -Copyright 2021-present NAVER Corp. -Apache License v2.0 -''' - -import os -import numpy as np -import cv2 -import torch -from torch.nn import functional as F - - -def deccode_output_score_and_ptss(tpMap, topk_n = 200, ksize = 5): - ''' - tpMap: - center: tpMap[1, 0, :, :] - displacement: tpMap[1, 1:5, :, :] - ''' - b, c, h, w = tpMap.shape - assert b==1, 'only support bsize==1' - displacement = tpMap[:, 1:5, :, :][0] - center = tpMap[:, 0, :, :] - heat = torch.sigmoid(center) - hmax = F.max_pool2d( heat, (ksize, ksize), stride=1, padding=(ksize-1)//2) - keep = (hmax == heat).float() - heat = heat * keep - heat = heat.reshape(-1, ) - - scores, indices = torch.topk(heat, topk_n, dim=-1, largest=True) - yy = torch.floor_divide(indices, w).unsqueeze(-1) - xx = torch.fmod(indices, w).unsqueeze(-1) - ptss = torch.cat((yy, xx),dim=-1) - - ptss = ptss.detach().cpu().numpy() - scores = scores.detach().cpu().numpy() - displacement = displacement.detach().cpu().numpy() - displacement = displacement.transpose((1,2,0)) - return ptss, scores, displacement - - -def pred_lines(image, model, - input_shape=[512, 512], - score_thr=0.10, - dist_thr=20.0): - h, w, _ = image.shape - h_ratio, w_ratio = [h / input_shape[0], w / input_shape[1]] - - resized_image = np.concatenate([cv2.resize(image, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA), - np.ones([input_shape[0], input_shape[1], 1])], axis=-1) - - resized_image = resized_image.transpose((2,0,1)) - batch_image = np.expand_dims(resized_image, axis=0).astype('float32') - batch_image = (batch_image / 127.5) - 1.0 - - batch_image = torch.from_numpy(batch_image).float().cuda() - outputs = model(batch_image) - pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3) - start = vmap[:, :, :2] - end = vmap[:, :, 2:] - dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1)) - - segments_list = [] - for center, score in zip(pts, pts_score): - y, x = center - distance = dist_map[y, x] - if score > score_thr and distance > dist_thr: - disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :] - x_start = x + disp_x_start - y_start = y + disp_y_start - x_end = x + disp_x_end - y_end = y + disp_y_end - segments_list.append([x_start, y_start, x_end, y_end]) - - lines = 2 * np.array(segments_list) # 256 > 512 - lines[:, 0] = lines[:, 0] * w_ratio - lines[:, 1] = lines[:, 1] * h_ratio - lines[:, 2] = lines[:, 2] * w_ratio - lines[:, 3] = lines[:, 3] * h_ratio - - return lines - - -def pred_squares(image, - model, - input_shape=[512, 512], - params={'score': 0.06, - 'outside_ratio': 0.28, - 'inside_ratio': 0.45, - 'w_overlap': 0.0, - 'w_degree': 1.95, - 'w_length': 0.0, - 'w_area': 1.86, - 'w_center': 0.14}): - ''' - shape = [height, width] - ''' - h, w, _ = image.shape - original_shape = [h, w] - - resized_image = np.concatenate([cv2.resize(image, (input_shape[0], input_shape[1]), interpolation=cv2.INTER_AREA), - np.ones([input_shape[0], input_shape[1], 1])], axis=-1) - resized_image = resized_image.transpose((2, 0, 1)) - batch_image = np.expand_dims(resized_image, axis=0).astype('float32') - batch_image = (batch_image / 127.5) - 1.0 - - batch_image = torch.from_numpy(batch_image).float().cuda() - outputs = model(batch_image) - - pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3) - start = vmap[:, :, :2] # (x, y) - end = vmap[:, :, 2:] # (x, y) - dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1)) - - junc_list = [] - segments_list = [] - for junc, score in zip(pts, pts_score): - y, x = junc - distance = dist_map[y, x] - if score > params['score'] and distance > 20.0: - junc_list.append([x, y]) - disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :] - d_arrow = 1.0 - x_start = x + d_arrow * disp_x_start - y_start = y + d_arrow * disp_y_start - x_end = x + d_arrow * disp_x_end - y_end = y + d_arrow * disp_y_end - segments_list.append([x_start, y_start, x_end, y_end]) - - segments = np.array(segments_list) - - ####### post processing for squares - # 1. get unique lines - point = np.array([[0, 0]]) - point = point[0] - start = segments[:, :2] - end = segments[:, 2:] - diff = start - end - a = diff[:, 1] - b = -diff[:, 0] - c = a * start[:, 0] + b * start[:, 1] - - d = np.abs(a * point[0] + b * point[1] - c) / np.sqrt(a ** 2 + b ** 2 + 1e-10) - theta = np.arctan2(diff[:, 0], diff[:, 1]) * 180 / np.pi - theta[theta < 0.0] += 180 - hough = np.concatenate([d[:, None], theta[:, None]], axis=-1) - - d_quant = 1 - theta_quant = 2 - hough[:, 0] //= d_quant - hough[:, 1] //= theta_quant - _, indices, counts = np.unique(hough, axis=0, return_index=True, return_counts=True) - - acc_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='float32') - idx_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='int32') - 1 - yx_indices = hough[indices, :].astype('int32') - acc_map[yx_indices[:, 0], yx_indices[:, 1]] = counts - idx_map[yx_indices[:, 0], yx_indices[:, 1]] = indices - - acc_map_np = acc_map - # acc_map = acc_map[None, :, :, None] - # - # ### fast suppression using tensorflow op - # acc_map = tf.constant(acc_map, dtype=tf.float32) - # max_acc_map = tf.keras.layers.MaxPool2D(pool_size=(5, 5), strides=1, padding='same')(acc_map) - # acc_map = acc_map * tf.cast(tf.math.equal(acc_map, max_acc_map), tf.float32) - # flatten_acc_map = tf.reshape(acc_map, [1, -1]) - # topk_values, topk_indices = tf.math.top_k(flatten_acc_map, k=len(pts)) - # _, h, w, _ = acc_map.shape - # y = tf.expand_dims(topk_indices // w, axis=-1) - # x = tf.expand_dims(topk_indices % w, axis=-1) - # yx = tf.concat([y, x], axis=-1) - - ### fast suppression using pytorch op - acc_map = torch.from_numpy(acc_map_np).unsqueeze(0).unsqueeze(0) - _,_, h, w = acc_map.shape - max_acc_map = F.max_pool2d(acc_map,kernel_size=5, stride=1, padding=2) - acc_map = acc_map * ( (acc_map == max_acc_map).float() ) - flatten_acc_map = acc_map.reshape([-1, ]) - - scores, indices = torch.topk(flatten_acc_map, len(pts), dim=-1, largest=True) - yy = torch.div(indices, w, rounding_mode='floor').unsqueeze(-1) - xx = torch.fmod(indices, w).unsqueeze(-1) - yx = torch.cat((yy, xx), dim=-1) - - yx = yx.detach().cpu().numpy() - - topk_values = scores.detach().cpu().numpy() - indices = idx_map[yx[:, 0], yx[:, 1]] - basis = 5 // 2 - - merged_segments = [] - for yx_pt, max_indice, value in zip(yx, indices, topk_values): - y, x = yx_pt - if max_indice == -1 or value == 0: - continue - segment_list = [] - for y_offset in range(-basis, basis + 1): - for x_offset in range(-basis, basis + 1): - indice = idx_map[y + y_offset, x + x_offset] - cnt = int(acc_map_np[y + y_offset, x + x_offset]) - if indice != -1: - segment_list.append(segments[indice]) - if cnt > 1: - check_cnt = 1 - current_hough = hough[indice] - for new_indice, new_hough in enumerate(hough): - if (current_hough == new_hough).all() and indice != new_indice: - segment_list.append(segments[new_indice]) - check_cnt += 1 - if check_cnt == cnt: - break - group_segments = np.array(segment_list).reshape([-1, 2]) - sorted_group_segments = np.sort(group_segments, axis=0) - x_min, y_min = sorted_group_segments[0, :] - x_max, y_max = sorted_group_segments[-1, :] - - deg = theta[max_indice] - if deg >= 90: - merged_segments.append([x_min, y_max, x_max, y_min]) - else: - merged_segments.append([x_min, y_min, x_max, y_max]) - - # 2. get intersections - new_segments = np.array(merged_segments) # (x1, y1, x2, y2) - start = new_segments[:, :2] # (x1, y1) - end = new_segments[:, 2:] # (x2, y2) - new_centers = (start + end) / 2.0 - diff = start - end - dist_segments = np.sqrt(np.sum(diff ** 2, axis=-1)) - - # ax + by = c - a = diff[:, 1] - b = -diff[:, 0] - c = a * start[:, 0] + b * start[:, 1] - pre_det = a[:, None] * b[None, :] - det = pre_det - np.transpose(pre_det) - - pre_inter_y = a[:, None] * c[None, :] - inter_y = (pre_inter_y - np.transpose(pre_inter_y)) / (det + 1e-10) - pre_inter_x = c[:, None] * b[None, :] - inter_x = (pre_inter_x - np.transpose(pre_inter_x)) / (det + 1e-10) - inter_pts = np.concatenate([inter_x[:, :, None], inter_y[:, :, None]], axis=-1).astype('int32') - - # 3. get corner information - # 3.1 get distance - ''' - dist_segments: - | dist(0), dist(1), dist(2), ...| - dist_inter_to_segment1: - | dist(inter,0), dist(inter,0), dist(inter,0), ... | - | dist(inter,1), dist(inter,1), dist(inter,1), ... | - ... - dist_inter_to_semgnet2: - | dist(inter,0), dist(inter,1), dist(inter,2), ... | - | dist(inter,0), dist(inter,1), dist(inter,2), ... | - ... - ''' - - dist_inter_to_segment1_start = np.sqrt( - np.sum(((inter_pts - start[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1] - dist_inter_to_segment1_end = np.sqrt( - np.sum(((inter_pts - end[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1] - dist_inter_to_segment2_start = np.sqrt( - np.sum(((inter_pts - start[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1] - dist_inter_to_segment2_end = np.sqrt( - np.sum(((inter_pts - end[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1] - - # sort ascending - dist_inter_to_segment1 = np.sort( - np.concatenate([dist_inter_to_segment1_start, dist_inter_to_segment1_end], axis=-1), - axis=-1) # [n_batch, n_batch, 2] - dist_inter_to_segment2 = np.sort( - np.concatenate([dist_inter_to_segment2_start, dist_inter_to_segment2_end], axis=-1), - axis=-1) # [n_batch, n_batch, 2] - - # 3.2 get degree - inter_to_start = new_centers[:, None, :] - inter_pts - deg_inter_to_start = np.arctan2(inter_to_start[:, :, 1], inter_to_start[:, :, 0]) * 180 / np.pi - deg_inter_to_start[deg_inter_to_start < 0.0] += 360 - inter_to_end = new_centers[None, :, :] - inter_pts - deg_inter_to_end = np.arctan2(inter_to_end[:, :, 1], inter_to_end[:, :, 0]) * 180 / np.pi - deg_inter_to_end[deg_inter_to_end < 0.0] += 360 - - ''' - B -- G - | | - C -- R - B : blue / G: green / C: cyan / R: red - - 0 -- 1 - | | - 3 -- 2 - ''' - # rename variables - deg1_map, deg2_map = deg_inter_to_start, deg_inter_to_end - # sort deg ascending - deg_sort = np.sort(np.concatenate([deg1_map[:, :, None], deg2_map[:, :, None]], axis=-1), axis=-1) - - deg_diff_map = np.abs(deg1_map - deg2_map) - # we only consider the smallest degree of intersect - deg_diff_map[deg_diff_map > 180] = 360 - deg_diff_map[deg_diff_map > 180] - - # define available degree range - deg_range = [60, 120] - - corner_dict = {corner_info: [] for corner_info in range(4)} - inter_points = [] - for i in range(inter_pts.shape[0]): - for j in range(i + 1, inter_pts.shape[1]): - # i, j > line index, always i < j - x, y = inter_pts[i, j, :] - deg1, deg2 = deg_sort[i, j, :] - deg_diff = deg_diff_map[i, j] - - check_degree = deg_diff > deg_range[0] and deg_diff < deg_range[1] - - outside_ratio = params['outside_ratio'] # over ratio >>> drop it! - inside_ratio = params['inside_ratio'] # over ratio >>> drop it! - check_distance = ((dist_inter_to_segment1[i, j, 1] >= dist_segments[i] and \ - dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * outside_ratio) or \ - (dist_inter_to_segment1[i, j, 1] <= dist_segments[i] and \ - dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * inside_ratio)) and \ - ((dist_inter_to_segment2[i, j, 1] >= dist_segments[j] and \ - dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * outside_ratio) or \ - (dist_inter_to_segment2[i, j, 1] <= dist_segments[j] and \ - dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * inside_ratio)) - - if check_degree and check_distance: - corner_info = None - - if (deg1 >= 0 and deg1 <= 45 and deg2 >= 45 and deg2 <= 120) or \ - (deg2 >= 315 and deg1 >= 45 and deg1 <= 120): - corner_info, color_info = 0, 'blue' - elif (deg1 >= 45 and deg1 <= 125 and deg2 >= 125 and deg2 <= 225): - corner_info, color_info = 1, 'green' - elif (deg1 >= 125 and deg1 <= 225 and deg2 >= 225 and deg2 <= 315): - corner_info, color_info = 2, 'black' - elif (deg1 >= 0 and deg1 <= 45 and deg2 >= 225 and deg2 <= 315) or \ - (deg2 >= 315 and deg1 >= 225 and deg1 <= 315): - corner_info, color_info = 3, 'cyan' - else: - corner_info, color_info = 4, 'red' # we don't use it - continue - - corner_dict[corner_info].append([x, y, i, j]) - inter_points.append([x, y]) - - square_list = [] - connect_list = [] - segments_list = [] - for corner0 in corner_dict[0]: - for corner1 in corner_dict[1]: - connect01 = False - for corner0_line in corner0[2:]: - if corner0_line in corner1[2:]: - connect01 = True - break - if connect01: - for corner2 in corner_dict[2]: - connect12 = False - for corner1_line in corner1[2:]: - if corner1_line in corner2[2:]: - connect12 = True - break - if connect12: - for corner3 in corner_dict[3]: - connect23 = False - for corner2_line in corner2[2:]: - if corner2_line in corner3[2:]: - connect23 = True - break - if connect23: - for corner3_line in corner3[2:]: - if corner3_line in corner0[2:]: - # SQUARE!!! - ''' - 0 -- 1 - | | - 3 -- 2 - square_list: - order: 0 > 1 > 2 > 3 - | x0, y0, x1, y1, x2, y2, x3, y3 | - | x0, y0, x1, y1, x2, y2, x3, y3 | - ... - connect_list: - order: 01 > 12 > 23 > 30 - | line_idx01, line_idx12, line_idx23, line_idx30 | - | line_idx01, line_idx12, line_idx23, line_idx30 | - ... - segments_list: - order: 0 > 1 > 2 > 3 - | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j | - | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j | - ... - ''' - square_list.append(corner0[:2] + corner1[:2] + corner2[:2] + corner3[:2]) - connect_list.append([corner0_line, corner1_line, corner2_line, corner3_line]) - segments_list.append(corner0[2:] + corner1[2:] + corner2[2:] + corner3[2:]) - - def check_outside_inside(segments_info, connect_idx): - # return 'outside or inside', min distance, cover_param, peri_param - if connect_idx == segments_info[0]: - check_dist_mat = dist_inter_to_segment1 - else: - check_dist_mat = dist_inter_to_segment2 - - i, j = segments_info - min_dist, max_dist = check_dist_mat[i, j, :] - connect_dist = dist_segments[connect_idx] - if max_dist > connect_dist: - return 'outside', min_dist, 0, 1 - else: - return 'inside', min_dist, -1, -1 - - top_square = None - - try: - map_size = input_shape[0] / 2 - squares = np.array(square_list).reshape([-1, 4, 2]) - score_array = [] - connect_array = np.array(connect_list) - segments_array = np.array(segments_list).reshape([-1, 4, 2]) - - # get degree of corners: - squares_rollup = np.roll(squares, 1, axis=1) - squares_rolldown = np.roll(squares, -1, axis=1) - vec1 = squares_rollup - squares - normalized_vec1 = vec1 / (np.linalg.norm(vec1, axis=-1, keepdims=True) + 1e-10) - vec2 = squares_rolldown - squares - normalized_vec2 = vec2 / (np.linalg.norm(vec2, axis=-1, keepdims=True) + 1e-10) - inner_products = np.sum(normalized_vec1 * normalized_vec2, axis=-1) # [n_squares, 4] - squares_degree = np.arccos(inner_products) * 180 / np.pi # [n_squares, 4] - - # get square score - overlap_scores = [] - degree_scores = [] - length_scores = [] - - for connects, segments, square, degree in zip(connect_array, segments_array, squares, squares_degree): - ''' - 0 -- 1 - | | - 3 -- 2 - - # segments: [4, 2] - # connects: [4] - ''' - - ###################################### OVERLAP SCORES - cover = 0 - perimeter = 0 - # check 0 > 1 > 2 > 3 - square_length = [] - - for start_idx in range(4): - end_idx = (start_idx + 1) % 4 - - connect_idx = connects[start_idx] # segment idx of segment01 - start_segments = segments[start_idx] - end_segments = segments[end_idx] - - start_point = square[start_idx] - end_point = square[end_idx] - - # check whether outside or inside - start_position, start_min, start_cover_param, start_peri_param = check_outside_inside(start_segments, - connect_idx) - end_position, end_min, end_cover_param, end_peri_param = check_outside_inside(end_segments, connect_idx) - - cover += dist_segments[connect_idx] + start_cover_param * start_min + end_cover_param * end_min - perimeter += dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min - - square_length.append( - dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min) - - overlap_scores.append(cover / perimeter) - ###################################### - ###################################### DEGREE SCORES - ''' - deg0 vs deg2 - deg1 vs deg3 - ''' - deg0, deg1, deg2, deg3 = degree - deg_ratio1 = deg0 / deg2 - if deg_ratio1 > 1.0: - deg_ratio1 = 1 / deg_ratio1 - deg_ratio2 = deg1 / deg3 - if deg_ratio2 > 1.0: - deg_ratio2 = 1 / deg_ratio2 - degree_scores.append((deg_ratio1 + deg_ratio2) / 2) - ###################################### - ###################################### LENGTH SCORES - ''' - len0 vs len2 - len1 vs len3 - ''' - len0, len1, len2, len3 = square_length - len_ratio1 = len0 / len2 if len2 > len0 else len2 / len0 - len_ratio2 = len1 / len3 if len3 > len1 else len3 / len1 - length_scores.append((len_ratio1 + len_ratio2) / 2) - - ###################################### - - overlap_scores = np.array(overlap_scores) - overlap_scores /= np.max(overlap_scores) - - degree_scores = np.array(degree_scores) - # degree_scores /= np.max(degree_scores) - - length_scores = np.array(length_scores) - - ###################################### AREA SCORES - area_scores = np.reshape(squares, [-1, 4, 2]) - area_x = area_scores[:, :, 0] - area_y = area_scores[:, :, 1] - correction = area_x[:, -1] * area_y[:, 0] - area_y[:, -1] * area_x[:, 0] - area_scores = np.sum(area_x[:, :-1] * area_y[:, 1:], axis=-1) - np.sum(area_y[:, :-1] * area_x[:, 1:], axis=-1) - area_scores = 0.5 * np.abs(area_scores + correction) - area_scores /= (map_size * map_size) # np.max(area_scores) - ###################################### - - ###################################### CENTER SCORES - centers = np.array([[256 // 2, 256 // 2]], dtype='float32') # [1, 2] - # squares: [n, 4, 2] - square_centers = np.mean(squares, axis=1) # [n, 2] - center2center = np.sqrt(np.sum((centers - square_centers) ** 2)) - center_scores = center2center / (map_size / np.sqrt(2.0)) - - ''' - score_w = [overlap, degree, area, center, length] - ''' - score_w = [0.0, 1.0, 10.0, 0.5, 1.0] - score_array = params['w_overlap'] * overlap_scores \ - + params['w_degree'] * degree_scores \ - + params['w_area'] * area_scores \ - - params['w_center'] * center_scores \ - + params['w_length'] * length_scores - - best_square = [] - - sorted_idx = np.argsort(score_array)[::-1] - score_array = score_array[sorted_idx] - squares = squares[sorted_idx] - - except Exception as e: - pass - - '''return list - merged_lines, squares, scores - ''' - - try: - new_segments[:, 0] = new_segments[:, 0] * 2 / input_shape[1] * original_shape[1] - new_segments[:, 1] = new_segments[:, 1] * 2 / input_shape[0] * original_shape[0] - new_segments[:, 2] = new_segments[:, 2] * 2 / input_shape[1] * original_shape[1] - new_segments[:, 3] = new_segments[:, 3] * 2 / input_shape[0] * original_shape[0] - except: - new_segments = [] - - try: - squares[:, :, 0] = squares[:, :, 0] * 2 / input_shape[1] * original_shape[1] - squares[:, :, 1] = squares[:, :, 1] * 2 / input_shape[0] * original_shape[0] - except: - squares = [] - score_array = [] - - try: - inter_points = np.array(inter_points) - inter_points[:, 0] = inter_points[:, 0] * 2 / input_shape[1] * original_shape[1] - inter_points[:, 1] = inter_points[:, 1] * 2 / input_shape[0] * original_shape[0] - except: - inter_points = [] - - return new_segments, squares, score_array, inter_points diff --git a/spaces/glyszt/vt/vtoonify/model/stylegan/distributed.py b/spaces/glyszt/vt/vtoonify/model/stylegan/distributed.py deleted file mode 100644 index 51fa243257ef302e2015d5ff36ac531b86a9a0ce..0000000000000000000000000000000000000000 --- a/spaces/glyszt/vt/vtoonify/model/stylegan/distributed.py +++ /dev/null @@ -1,126 +0,0 @@ -import math -import pickle - -import torch -from torch import distributed as dist -from torch.utils.data.sampler import Sampler - - -def get_rank(): - if not dist.is_available(): - return 0 - - if not dist.is_initialized(): - return 0 - - return dist.get_rank() - - -def synchronize(): - if not dist.is_available(): - return - - if not dist.is_initialized(): - return - - world_size = dist.get_world_size() - - if world_size == 1: - return - - dist.barrier() - - -def get_world_size(): - if not dist.is_available(): - return 1 - - if not dist.is_initialized(): - return 1 - - return dist.get_world_size() - - -def reduce_sum(tensor): - if not dist.is_available(): - return tensor - - if not dist.is_initialized(): - return tensor - - tensor = tensor.clone() - dist.all_reduce(tensor, op=dist.ReduceOp.SUM) - - return tensor - - -def gather_grad(params): - world_size = get_world_size() - - if world_size == 1: - return - - for param in params: - if param.grad is not None: - dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM) - param.grad.data.div_(world_size) - - -def all_gather(data): - world_size = get_world_size() - - if world_size == 1: - return [data] - - buffer = pickle.dumps(data) - storage = torch.ByteStorage.from_buffer(buffer) - tensor = torch.ByteTensor(storage).to('cuda') - - local_size = torch.IntTensor([tensor.numel()]).to('cuda') - size_list = [torch.IntTensor([0]).to('cuda') for _ in range(world_size)] - dist.all_gather(size_list, local_size) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - - tensor_list = [] - for _ in size_list: - tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda')) - - if local_size != max_size: - padding = torch.ByteTensor(size=(max_size - local_size,)).to('cuda') - tensor = torch.cat((tensor, padding), 0) - - dist.all_gather(tensor_list, tensor) - - data_list = [] - - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - - return data_list - - -def reduce_loss_dict(loss_dict): - world_size = get_world_size() - - if world_size < 2: - return loss_dict - - with torch.no_grad(): - keys = [] - losses = [] - - for k in sorted(loss_dict.keys()): - keys.append(k) - losses.append(loss_dict[k]) - - losses = torch.stack(losses, 0) - dist.reduce(losses, dst=0) - - if dist.get_rank() == 0: - losses /= world_size - - reduced_losses = {k: v for k, v in zip(keys, losses)} - - return reduced_losses diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Grandeu Item Editor A Must-Have Mod for Dungeon Defenders Lovers.md b/spaces/gotiQspiryo/whisper-ui/examples/Grandeu Item Editor A Must-Have Mod for Dungeon Defenders Lovers.md deleted file mode 100644 index 2fae00e4184bc2fe37f8619b2a1347bb936b96dd..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Grandeu Item Editor A Must-Have Mod for Dungeon Defenders Lovers.md +++ /dev/null @@ -1,8 +0,0 @@ -<br /> -<p>The export option will allow you to export the current search results of the entered query to a file. Differen formats are available for download. To export the items, click on the button corresponding with the preferred download format.</p> -<p>To select a subset of the search results, click "Selective Export" button and make a selection of the items you want to export. The amount of items that can be exported at once is similarly restricted as the full export.</p> -<h2>Grandeu Item Editor</h2><br /><p><b><b>DOWNLOAD</b> ✫ <a href="https://urlgoal.com/2uyMEv">https://urlgoal.com/2uyMEv</a></b></p><br /><br /> -<p>"Auteurism, Part 1": Tired of what he perceives to be inadequate media coverage, the Joker sets out to make an autobiographical movie, with golden age filmmaker and comic Buddy Kantor starring as him. Kantor, a washed-up alcoholic who suffers from constant hallucinations and delusions of grandeu</p> -<p>Tired of what he perceives to be inadequate media coverage, the Joker sets out to make an autobiographical movie, with golden age filmmaker and comic Buddy Kantor starring as him. Kantor, a washed-up alcoholic who suffers from constant hallucinations and delusions of grandeur, is at first reluctant to participate, but quickly loses himself in the role when the Joker begins to feed his ego.</p> aaccfb2cb3<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Klavir Za Pocetnike.pdf.md b/spaces/gotiQspiryo/whisper-ui/examples/Klavir Za Pocetnike.pdf.md deleted file mode 100644 index 3283241165f32554627d09f2ea820996be89f662..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Klavir Za Pocetnike.pdf.md +++ /dev/null @@ -1,62 +0,0 @@ -<h2>Klavir Za Pocetnike.pdf</h2><br /><p><b><b>DOWNLOAD</b> 🆗 <a href="https://urlgoal.com/2uyNfi">https://urlgoal.com/2uyNfi</a></b></p><br /><br /> -<br /> -While the appellate court was very cautious in its ruling, it did hold that - -a student has a right to maintain a First Amendment claim against a public - -school. That has implications for the thousands of school boards across - -the country that ban heavy metal and punk rock. - -[ - -Everything-A...]( - -Adults/dp/0964810001/ref=sr_1_2?ie=UTF8&qid=1536776410&sr=8-2&keywords=punk) - -~~~ - -janvidas - -Or metal in general? This is not what the German schools are teaching their - -students: - -verbotene...]( - -Gewalt-in-der-Schule-In-Deutschland-werden-auch-noch-mehr-kinder-getötet.html) - ->Dass die Deutschen besonders rau geworden sind, dürfte eben nur der - -Schlusskurs sein. - ->Zudem wurde durch eine ganze Reihe von Studien bestätigt, dass unsere - -Kinder jetzt sogar besonders menschenfreundlich sind. - -skrebbel - -The article is not that Germany is the only place that bans heavy metal, but - -that Germany is the only place that bans heavy metal _in schools_. It's - -ironic that the people in Germany that want it to be forbidden are, in their - -extreme right, in favor of killing more children in schools, but many, many - -countries, including those that traditionally fall within the "liberal - -democratic left" or whatever, banned it from schools without and across the - -border well before Germany. I guess kids aren't the only ones that like to - -play with fire. - -TheOtherHobbes - -The problem with banning metal or any other genre of music, as opposed to - -just 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/gyrojeff/YuzuMarker.FontDetection/font_dataset/text.py b/spaces/gyrojeff/YuzuMarker.FontDetection/font_dataset/text.py deleted file mode 100644 index 2731ea47e1a212309fc18f711df1243f2d14534f..0000000000000000000000000000000000000000 --- a/spaces/gyrojeff/YuzuMarker.FontDetection/font_dataset/text.py +++ /dev/null @@ -1,227 +0,0 @@ -import os -import random -import requests -from .font import DSFont -from .helper import char_in_font - -__all__ = [ - "random_char", - "UnqualifiedFontException", - "CorpusGenerationConfig", - "CorpusGeneratorManager", -] - -# https://zh.wikipedia.org/zh-hans/%E5%B9%B3%E5%81%87%E5%90%8D -hiragana = ( - "ぁあぃいぅうぇえぉおかがきぎくぐけげこごさざしじすずせぜそぞただちぢっつづてでとどなにぬねのはばぱひびぴふぶぷへべぺほぼぽまみむめもゃやゅゆょよらりるれろゎわをん" -) - -# https://zh.wikipedia.org/zh-hans/%E7%89%87%E5%81%87%E5%90%8D -katakana = "ァアィイゥウェエォオカガキギクグケゲコゴサザシジスズセゼソゾタダチヂッツヅテデトドナニヌネノハバパヒビピフブプヘベペホボポマミムメモャヤュユョヨラリルレロヮワヲンヵヶ" - -# https://ja.wiktionary.org/wiki/%E4%BB%98%E9%8C%B2:%E5%B8%B8%E7%94%A8%E6%BC%A2%E5%AD%97%E3%81%AE%E4%B8%80%E8%A6%A7 -common_kanji = "亜哀挨愛曖悪握圧扱宛嵐安案暗以衣位囲医依委威為畏胃尉異移萎偉椅彙意違維慰遺緯域育一壱逸茨芋引印因咽姻員院淫陰飲隠韻右宇羽雨唄鬱畝浦運雲永泳英映栄営詠影鋭衛易疫益液駅悦越謁閲円延沿炎怨宴媛援園煙猿遠鉛塩演縁艶汚王凹央応往押旺欧殴桜翁奥横岡屋億憶臆虞乙俺卸音恩温穏下化火加可仮何花佳価果河苛科架夏家荷華菓貨渦過嫁暇禍靴寡歌箇稼課蚊牙瓦我画芽賀雅餓介回灰会快戒改怪拐悔海界皆械絵開階塊楷解潰壊懐諧貝外劾害崖涯街慨蓋該概骸垣柿各角拡革格核殻郭覚較隔閣確獲嚇穫学岳楽額顎掛潟括活喝渇割葛滑褐轄且株釜鎌刈干刊甘汗缶完肝官冠巻看陥乾勘患貫寒喚堪換敢棺款間閑勧寛幹感漢慣管関歓監緩憾還館環簡観韓艦鑑丸含岸岩玩眼頑顔願企伎危机気岐希忌汽奇祈季紀軌既記起飢鬼帰基寄規亀喜幾揮期棋貴棄毀旗器畿輝機騎技宜偽欺義疑儀戯擬犠議菊吉喫詰却客脚逆虐九久及弓丘旧休吸朽臼求究泣急級糾宮救球給嗅窮牛去巨居拒拠挙虚許距魚御漁凶共叫狂京享供協況峡挟狭恐恭胸脅強教郷境橋矯鏡競響驚仰暁業凝曲局極玉巾斤均近金菌勤琴筋僅禁緊錦謹襟吟銀区句苦駆具惧愚空偶遇隅串屈掘窟熊繰君訓勲薫軍郡群兄刑形系径茎係型契計恵啓掲渓経蛍敬景軽傾携継詣慶憬稽憩警鶏芸迎鯨隙劇撃激桁欠穴血決結傑潔月犬件見券肩建研県倹兼剣拳軒健険圏堅検嫌献絹遣権憲賢謙鍵繭顕験懸元幻玄言弦限原現舷減源厳己戸古呼固股虎孤弧故枯個庫湖雇誇鼓錮顧五互午呉後娯悟碁語誤護口工公勾孔功巧広甲交光向后好江考行坑孝抗攻更効幸拘肯侯厚恒洪皇紅荒郊香候校耕航貢降高康控梗黄喉慌港硬絞項溝鉱構綱酵稿興衡鋼講購乞号合拷剛傲豪克告谷刻国黒穀酷獄骨駒込頃今困昆恨根婚混痕紺魂墾懇左佐沙査砂唆差詐鎖座挫才再災妻采砕宰栽彩採済祭斎細菜最裁債催塞歳載際埼在材剤財罪崎作削昨柵索策酢搾錯咲冊札刷刹拶殺察撮擦雑皿三山参桟蚕惨産傘散算酸賛残斬暫士子支止氏仕史司四市矢旨死糸至伺志私使刺始姉枝祉肢姿思指施師恣紙脂視紫詞歯嗣試詩資飼誌雌摯賜諮示字寺次耳自似児事侍治持時滋慈辞磁餌璽鹿式識軸七叱失室疾執湿嫉漆質実芝写社車舎者射捨赦斜煮遮謝邪蛇尺借酌釈爵若弱寂手主守朱取狩首殊珠酒腫種趣寿受呪授需儒樹収囚州舟秀周宗拾秋臭修袖終羞習週就衆集愁酬醜蹴襲十汁充住柔重従渋銃獣縦叔祝宿淑粛縮塾熟出述術俊春瞬旬巡盾准殉純循順準潤遵処初所書庶暑署緒諸女如助序叙徐除小升少召匠床抄肖尚招承昇松沼昭宵将消症祥称笑唱商渉章紹訟勝掌晶焼焦硝粧詔証象傷奨照詳彰障憧衝賞償礁鐘上丈冗条状乗城浄剰常情場畳蒸縄壌嬢錠譲醸色拭食植殖飾触嘱織職辱尻心申伸臣芯身辛侵信津神唇娠振浸真針深紳進森診寝慎新審震薪親人刃仁尽迅甚陣尋腎須図水吹垂炊帥粋衰推酔遂睡穂随髄枢崇数据杉裾寸瀬是井世正生成西声制姓征性青斉政星牲省凄逝清盛婿晴勢聖誠精製誓静請整醒税夕斥石赤昔析席脊隻惜戚責跡積績籍切折拙窃接設雪摂節説舌絶千川仙占先宣専泉浅洗染扇栓旋船戦煎羨腺詮践箋銭潜線遷選薦繊鮮全前善然禅漸膳繕狙阻祖租素措粗組疎訴塑遡礎双壮早争走奏相荘草送倉捜挿桑巣掃曹曽爽窓創喪痩葬装僧想層総遭槽踪操燥霜騒藻造像増憎蔵贈臓即束足促則息捉速側測俗族属賊続卒率存村孫尊損遜他多汰打妥唾堕惰駄太対体耐待怠胎退帯泰堆袋逮替貸隊滞態戴大代台第題滝宅択沢卓拓託濯諾濁但達脱奪棚誰丹旦担単炭胆探淡短嘆端綻誕鍛団男段断弾暖談壇地池知値恥致遅痴稚置緻竹畜逐蓄築秩窒茶着嫡中仲虫沖宙忠抽注昼柱衷酎鋳駐著貯丁弔庁兆町長挑帳張彫眺釣頂鳥朝貼超腸跳徴嘲潮澄調聴懲直勅捗沈珍朕陳賃鎮追椎墜通痛塚漬坪爪鶴低呈廷弟定底抵邸亭貞帝訂庭逓停偵堤提程艇締諦泥的笛摘滴適敵溺迭哲鉄徹撤天典店点展添転填田伝殿電斗吐妬徒途都渡塗賭土奴努度怒刀冬灯当投豆東到逃倒凍唐島桃討透党悼盗陶塔搭棟湯痘登答等筒統稲踏糖頭謄藤闘騰同洞胴動堂童道働銅導瞳峠匿特得督徳篤毒独読栃凸突届屯豚頓貪鈍曇丼那奈内梨謎鍋南軟難二尼弐匂肉虹日入乳尿任妊忍認寧熱年念捻粘燃悩納能脳農濃把波派破覇馬婆罵拝杯背肺俳配排敗廃輩売倍梅培陪媒買賠白伯拍泊迫剥舶博薄麦漠縛爆箱箸畑肌八鉢発髪伐抜罰閥反半氾犯帆汎伴判坂阪板版班畔般販斑飯搬煩頒範繁藩晩番蛮盤比皮妃否批彼披肥非卑飛疲秘被悲扉費碑罷避尾眉美備微鼻膝肘匹必泌筆姫百氷表俵票評漂標苗秒病描猫品浜貧賓頻敏瓶不夫父付布扶府怖阜附訃負赴浮婦符富普腐敷膚賦譜侮武部舞封風伏服副幅復福腹複覆払沸仏物粉紛雰噴墳憤奮分文聞丙平兵併並柄陛閉塀幣弊蔽餅米壁璧癖別蔑片辺返変偏遍編弁便勉歩保哺捕補舗母募墓慕暮簿方包芳邦奉宝抱放法泡胞俸倣峰砲崩訪報蜂豊飽褒縫亡乏忙坊妨忘防房肪某冒剖紡望傍帽棒貿貌暴膨謀頬北木朴牧睦僕墨撲没勃堀本奔翻凡盆麻摩磨魔毎妹枚昧埋幕膜枕又末抹万満慢漫未味魅岬密蜜脈妙民眠矛務無夢霧娘名命明迷冥盟銘鳴滅免面綿麺茂模毛妄盲耗猛網目黙門紋問冶夜野弥厄役約訳薬躍闇由油喩愉諭輸癒唯友有勇幽悠郵湧猶裕遊雄誘憂融優与予余誉預幼用羊妖洋要容庸揚揺葉陽溶腰様瘍踊窯養擁謡曜抑沃浴欲翌翼拉裸羅来雷頼絡落酪辣乱卵覧濫藍欄吏利里理痢裏履璃離陸立律慄略柳流留竜粒隆硫侶旅虜慮了両良料涼猟陵量僚領寮療瞭糧力緑林厘倫輪隣臨瑠涙累塁類令礼冷励戻例鈴零霊隷齢麗暦歴列劣烈裂恋連廉練錬呂炉賂路露老労弄郎朗浪廊楼漏籠六録麓論和話賄脇惑枠湾腕" - -# https://gist.github.com/simongfxu/13accd501f6c91e7a423ddc43e674c0f -common_simplified_chinese = "一乙二十丁厂七卜人入八九几儿了力乃刀又三于干亏士工土才寸下大丈与万上小口巾山千乞川亿个勺久凡及夕丸么广亡门义之尸弓己已子卫也女飞刃习叉马乡丰王井开夫天无元专云扎艺木五支厅不太犬区历尤友匹车巨牙屯比互切瓦止少日中冈贝内水见午牛手毛气升长仁什片仆化仇币仍仅斤爪反介父从今凶分乏公仓月氏勿欠风丹匀乌凤勾文六方火为斗忆订计户认心尺引丑巴孔队办以允予劝双书幻玉刊示末未击打巧正扑扒功扔去甘世古节本术可丙左厉右石布龙平灭轧东卡北占业旧帅归且旦目叶甲申叮电号田由史只央兄叼叫另叨叹四生失禾丘付仗代仙们仪白仔他斥瓜乎丛令用甩印乐句匆册犯外处冬鸟务包饥主市立闪兰半汁汇头汉宁穴它讨写让礼训必议讯记永司尼民出辽奶奴加召皮边发孕圣对台矛纠母幼丝式刑动扛寺吉扣考托老执巩圾扩扫地扬场耳共芒亚芝朽朴机权过臣再协西压厌在有百存而页匠夸夺灰达列死成夹轨邪划迈毕至此贞师尘尖劣光当早吐吓虫曲团同吊吃因吸吗屿帆岁回岂刚则肉网年朱先丢舌竹迁乔伟传乒乓休伍伏优伐延件任伤价份华仰仿伙伪自血向似后行舟全会杀合兆企众爷伞创肌朵杂危旬旨负各名多争色壮冲冰庄庆亦刘齐交次衣产决充妄闭问闯羊并关米灯州汗污江池汤忙兴宇守宅字安讲军许论农讽设访寻那迅尽导异孙阵阳收阶阴防奸如妇好她妈戏羽观欢买红纤级约纪驰巡寿弄麦形进戒吞远违运扶抚坛技坏扰拒找批扯址走抄坝贡攻赤折抓扮抢孝均抛投坟抗坑坊抖护壳志扭块声把报却劫芽花芹芬苍芳严芦劳克苏杆杠杜材村杏极李杨求更束豆两丽医辰励否还歼来连步坚旱盯呈时吴助县里呆园旷围呀吨足邮男困吵串员听吩吹呜吧吼别岗帐财针钉告我乱利秃秀私每兵估体何但伸作伯伶佣低你住位伴身皂佛近彻役返余希坐谷妥含邻岔肝肚肠龟免狂犹角删条卵岛迎饭饮系言冻状亩况床库疗应冷这序辛弃冶忘闲间闷判灶灿弟汪沙汽沃泛沟没沈沉怀忧快完宋宏牢究穷灾良证启评补初社识诉诊词译君灵即层尿尾迟局改张忌际陆阿陈阻附妙妖妨努忍劲鸡驱纯纱纳纲驳纵纷纸纹纺驴纽奉玩环武青责现表规抹拢拔拣担坦押抽拐拖拍者顶拆拥抵拘势抱垃拉拦拌幸招坡披拨择抬其取苦若茂苹苗英范直茄茎茅林枝杯柜析板松枪构杰述枕丧或画卧事刺枣雨卖矿码厕奔奇奋态欧垄妻轰顷转斩轮软到非叔肯齿些虎虏肾贤尚旺具果味昆国昌畅明易昂典固忠咐呼鸣咏呢岸岩帖罗帜岭凯败贩购图钓制知垂牧物乖刮秆和季委佳侍供使例版侄侦侧凭侨佩货依的迫质欣征往爬彼径所舍金命斧爸采受乳贪念贫肤肺肢肿胀朋股肥服胁周昏鱼兔狐忽狗备饰饱饲变京享店夜庙府底剂郊废净盲放刻育闸闹郑券卷单炒炊炕炎炉沫浅法泄河沾泪油泊沿泡注泻泳泥沸波泼泽治怖性怕怜怪学宝宗定宜审宙官空帘实试郎诗肩房诚衬衫视话诞询该详建肃录隶居届刷屈弦承孟孤陕降限妹姑姐姓始驾参艰线练组细驶织终驻驼绍经贯奏春帮珍玻毒型挂封持项垮挎城挠政赴赵挡挺括拴拾挑指垫挣挤拼挖按挥挪某甚革荐巷带草茧茶荒茫荡荣故胡南药标枯柄栋相查柏柳柱柿栏树要咸威歪研砖厘厚砌砍面耐耍牵残殃轻鸦皆背战点临览竖省削尝是盼眨哄显哑冒映星昨畏趴胃贵界虹虾蚁思蚂虽品咽骂哗咱响哈咬咳哪炭峡罚贱贴骨钞钟钢钥钩卸缸拜看矩怎牲选适秒香种秋科重复竿段便俩贷顺修保促侮俭俗俘信皇泉鬼侵追俊盾待律很须叙剑逃食盆胆胜胞胖脉勉狭狮独狡狱狠贸怨急饶蚀饺饼弯将奖哀亭亮度迹庭疮疯疫疤姿亲音帝施闻阀阁差养美姜叛送类迷前首逆总炼炸炮烂剃洁洪洒浇浊洞测洗活派洽染济洋洲浑浓津恒恢恰恼恨举觉宣室宫宪突穿窃客冠语扁袄祖神祝误诱说诵垦退既屋昼费陡眉孩除险院娃姥姨姻娇怒架贺盈勇怠柔垒绑绒结绕骄绘给络骆绝绞统耕耗艳泰珠班素蚕顽盏匪捞栽捕振载赶起盐捎捏埋捉捆捐损都哲逝捡换挽热恐壶挨耻耽恭莲莫荷获晋恶真框桂档桐株桥桃格校核样根索哥速逗栗配翅辱唇夏础破原套逐烈殊顾轿较顿毙致柴桌虑监紧党晒眠晓鸭晃晌晕蚊哨哭恩唤啊唉罢峰圆贼贿钱钳钻铁铃铅缺氧特牺造乘敌秤租积秧秩称秘透笔笑笋债借值倚倾倒倘俱倡候俯倍倦健臭射躬息徒徐舰舱般航途拿爹爱颂翁脆脂胸胳脏胶脑狸狼逢留皱饿恋桨浆衰高席准座脊症病疾疼疲效离唐资凉站剖竞部旁旅畜阅羞瓶拳粉料益兼烤烘烦烧烛烟递涛浙涝酒涉消浩海涂浴浮流润浪浸涨烫涌悟悄悔悦害宽家宵宴宾窄容宰案请朗诸读扇袜袖袍被祥课谁调冤谅谈谊剥恳展剧屑弱陵陶陷陪娱娘通能难预桑绢绣验继球理捧堵描域掩捷排掉堆推掀授教掏掠培接控探据掘职基著勒黄萌萝菌菜萄菊萍菠营械梦梢梅检梳梯桶救副票戚爽聋袭盛雪辅辆虚雀堂常匙晨睁眯眼悬野啦晚啄距跃略蛇累唱患唯崖崭崇圈铜铲银甜梨犁移笨笼笛符第敏做袋悠偿偶偷您售停偏假得衔盘船斜盒鸽悉欲彩领脚脖脸脱象够猜猪猎猫猛馅馆凑减毫麻痒痕廊康庸鹿盗章竟商族旋望率着盖粘粗粒断剪兽清添淋淹渠渐混渔淘液淡深婆梁渗情惜惭悼惧惕惊惨惯寇寄宿窑密谋谎祸谜逮敢屠弹随蛋隆隐婚婶颈绩绪续骑绳维绵绸绿琴斑替款堪搭塔越趁趋超提堤博揭喜插揪搜煮援裁搁搂搅握揉斯期欺联散惹葬葛董葡敬葱落朝辜葵棒棋植森椅椒棵棍棉棚棕惠惑逼厨厦硬确雁殖裂雄暂雅辈悲紫辉敞赏掌晴暑最量喷晶喇遇喊景践跌跑遗蛙蛛蜓喝喂喘喉幅帽赌赔黑铸铺链销锁锄锅锈锋锐短智毯鹅剩稍程稀税筐等筑策筛筒答筋筝傲傅牌堡集焦傍储奥街惩御循艇舒番释禽腊脾腔鲁猾猴然馋装蛮就痛童阔善羡普粪尊道曾焰港湖渣湿温渴滑湾渡游滋溉愤慌惰愧愉慨割寒富窜窝窗遍裕裤裙谢谣谦属屡强粥疏隔隙絮嫂登缎缓编骗缘瑞魂肆摄摸填搏塌鼓摆携搬摇搞塘摊蒜勤鹊蓝墓幕蓬蓄蒙蒸献禁楚想槐榆楼概赖酬感碍碑碎碰碗碌雷零雾雹输督龄鉴睛睡睬鄙愚暖盟歇暗照跨跳跪路跟遣蛾蜂嗓置罪罩错锡锣锤锦键锯矮辞稠愁筹签简毁舅鼠催傻像躲微愈遥腰腥腹腾腿触解酱痰廉新韵意粮数煎塑慈煤煌满漠源滤滥滔溪溜滚滨粱滩慎誉塞谨福群殿辟障嫌嫁叠缝缠静碧璃墙撇嘉摧截誓境摘摔聚蔽慕暮蔑模榴榜榨歌遭酷酿酸磁愿需弊裳颗嗽蜻蜡蝇蜘赚锹锻舞稳算箩管僚鼻魄貌膜膊膀鲜疑馒裹敲豪膏遮腐瘦辣竭端旗精歉熄熔漆漂漫滴演漏慢寨赛察蜜谱嫩翠熊凳骡缩慧撕撒趣趟撑播撞撤增聪鞋蕉蔬横槽樱橡飘醋醉震霉瞒题暴瞎影踢踏踩踪蝶蝴嘱墨镇靠稻黎稿稼箱箭篇僵躺僻德艘膝膛熟摩颜毅糊遵潜潮懂额慰劈操燕薯薪薄颠橘整融醒餐嘴蹄器赠默镜赞篮邀衡膨雕磨凝辨辩糖糕燃澡激懒壁避缴戴擦鞠藏霜霞瞧蹈螺穗繁辫赢糟糠燥臂翼骤鞭覆蹦镰翻鹰警攀蹲颤瓣爆疆壤耀躁嚼嚷籍魔灌蠢霸露囊罐匕刁丐歹戈夭仑讥冗邓艾夯凸卢叭叽皿凹囚矢乍尔冯玄邦迂邢芋芍吏夷吁吕吆屹廷迄臼仲伦伊肋旭匈凫妆亥汛讳讶讹讼诀弛阱驮驯纫玖玛韧抠扼汞扳抡坎坞抑拟抒芙芜苇芥芯芭杖杉巫杈甫匣轩卤肖吱吠呕呐吟呛吻吭邑囤吮岖牡佑佃伺囱肛肘甸狈鸠彤灸刨庇吝庐闰兑灼沐沛汰沥沦汹沧沪忱诅诈罕屁坠妓姊妒纬玫卦坷坯拓坪坤拄拧拂拙拇拗茉昔苛苫苟苞茁苔枉枢枚枫杭郁矾奈奄殴歧卓昙哎咕呵咙呻咒咆咖帕账贬贮氛秉岳侠侥侣侈卑刽刹肴觅忿瓮肮肪狞庞疟疙疚卒氓炬沽沮泣泞泌沼怔怯宠宛衩祈诡帚屉弧弥陋陌函姆虱叁绅驹绊绎契贰玷玲珊拭拷拱挟垢垛拯荆茸茬荚茵茴荞荠荤荧荔栈柑栅柠枷勃柬砂泵砚鸥轴韭虐昧盹咧昵昭盅勋哆咪哟幽钙钝钠钦钧钮毡氢秕俏俄俐侯徊衍胚胧胎狰饵峦奕咨飒闺闽籽娄烁炫洼柒涎洛恃恍恬恤宦诫诬祠诲屏屎逊陨姚娜蚤骇耘耙秦匿埂捂捍袁捌挫挚捣捅埃耿聂荸莽莱莉莹莺梆栖桦栓桅桩贾酌砸砰砾殉逞哮唠哺剔蚌蚜畔蚣蚪蚓哩圃鸯唁哼唆峭唧峻赂赃钾铆氨秫笆俺赁倔殷耸舀豺豹颁胯胰脐脓逛卿鸵鸳馁凌凄衷郭斋疹紊瓷羔烙浦涡涣涤涧涕涩悍悯窍诺诽袒谆祟恕娩骏琐麸琉琅措捺捶赦埠捻掐掂掖掷掸掺勘聊娶菱菲萎菩萤乾萧萨菇彬梗梧梭曹酝酗厢硅硕奢盔匾颅彪眶晤曼晦冕啡畦趾啃蛆蚯蛉蛀唬啰唾啤啥啸崎逻崔崩婴赊铐铛铝铡铣铭矫秸秽笙笤偎傀躯兜衅徘徙舶舷舵敛翎脯逸凰猖祭烹庶庵痊阎阐眷焊焕鸿涯淑淌淮淆渊淫淳淤淀涮涵惦悴惋寂窒谍谐裆袱祷谒谓谚尉堕隅婉颇绰绷综绽缀巢琳琢琼揍堰揩揽揖彭揣搀搓壹搔葫募蒋蒂韩棱椰焚椎棺榔椭粟棘酣酥硝硫颊雳翘凿棠晰鼎喳遏晾畴跋跛蛔蜒蛤鹃喻啼喧嵌赋赎赐锉锌甥掰氮氯黍筏牍粤逾腌腋腕猩猬惫敦痘痢痪竣翔奠遂焙滞湘渤渺溃溅湃愕惶寓窖窘雇谤犀隘媒媚婿缅缆缔缕骚瑟鹉瑰搪聘斟靴靶蓖蒿蒲蓉楔椿楷榄楞楣酪碘硼碉辐辑频睹睦瞄嗜嗦暇畸跷跺蜈蜗蜕蛹嗅嗡嗤署蜀幌锚锥锨锭锰稚颓筷魁衙腻腮腺鹏肄猿颖煞雏馍馏禀痹廓痴靖誊漓溢溯溶滓溺寞窥窟寝褂裸谬媳嫉缚缤剿赘熬赫蔫摹蔓蔗蔼熙蔚兢榛榕酵碟碴碱碳辕辖雌墅嘁踊蝉嘀幔镀舔熏箍箕箫舆僧孵瘩瘟彰粹漱漩漾慷寡寥谭褐褪隧嫡缨撵撩撮撬擒墩撰鞍蕊蕴樊樟橄敷豌醇磕磅碾憋嘶嘲嘹蝠蝎蝌蝗蝙嘿幢镊镐稽篓膘鲤鲫褒瘪瘤瘫凛澎潭潦澳潘澈澜澄憔懊憎翩褥谴鹤憨履嬉豫缭撼擂擅蕾薛薇擎翰噩橱橙瓢蟥霍霎辙冀踱蹂蟆螃螟噪鹦黔穆篡篷篙篱儒膳鲸瘾瘸糙燎濒憾懈窿缰壕藐檬檐檩檀礁磷瞭瞬瞳瞪曙蹋蟋蟀嚎赡镣魏簇儡徽爵朦臊鳄糜癌懦豁臀藕藤瞻嚣鳍癞瀑襟璧戳攒孽蘑藻鳖蹭蹬簸簿蟹靡癣羹鬓攘蠕巍鳞糯譬霹躏髓蘸镶瓤矗" - -# https://gist.github.com/simongfxu/13accd501f6c91e7a423ddc43e674c0f -common_traditional_chinese = "一乙二十丁廠七蔔人入八九幾兒了力乃刀又三於幹虧士工土才寸下大丈與萬上小口巾山千乞川億個勺久凡及夕丸麽廣亡門義之屍弓己已子衛也女飛刃習叉馬鄉豐王井開夫天無元專雲紮藝木五支廳不太犬区歷尤友匹車巨牙屯比互切瓦止少日中岡貝內水見午牛手毛氣升長仁什片仆化仇幣仍僅斤爪反介父從今兇分乏公倉月氏勿欠風丹勻烏鳳勾文六方火為鬥憶訂計戶認心尺引醜巴孔隊辦以允予勸雙書幻玉刊示末未擊打巧正撲扒功扔去甘世古節本術可丙左厲右石布龍平滅軋東卡北占業舊帥歸且旦目葉甲申叮电號田由史只央兄叼叫另叨嘆四生失禾丘付仗代仙們儀白仔他斥瓜乎叢令用甩印樂句匆冊犯外處冬鳥务包饑主市立閃蘭半汁匯頭漢寧穴它討寫讓禮訓必議訊記永司尼民出遼奶奴加召皮邊發孕聖對臺矛糾母幼絲式刑動扛寺吉扣考托老執鞏圾擴掃地揚場耳共芒亞芝朽樸機權過臣再協西壓厭在有百存而頁匠夸奪灰達列死成夾軌邪劃邁畢至此貞師塵尖劣光當早吐嚇蟲曲團同吊吃因吸嗎嶼帆歲回豈剛則肉網年朱先丟舌竹遷喬偉傳乒乓休伍伏優伐延件任傷價份華仰仿夥偽自血向似後行舟全會殺合兆企眾爺傘創肌朵雜危旬旨負各名多爭色壯沖冰莊慶亦劉齊交次衣產決充妄閉問闖羊並關米燈州汗汙江池湯忙興宇守宅字安講軍許論農諷設訪尋那迅盡導異孫陣陽收階陰防奸如婦好她媽戲羽觀歡買紅纖級約紀馳巡壽弄麥形進戒吞遠違運扶撫壇技壞擾拒找批扯址走抄壩貢攻赤折抓扮搶孝均拋投墳抗坑坊抖護殼誌扭塊声把報卻劫芽花芹芬蒼芳嚴蘆勞克蘇桿杠杜材村杏極李楊求更束豆兩麗醫辰勵否還殲來連步堅旱盯呈时吳助縣裏呆園曠圍呀噸足郵男困吵串員聽吩吹嗚吧吼別崗帳財針釘告我亂利禿秀私每兵估體何但伸作伯伶傭低你住位伴身皂佛近徹役返余希坐谷妥含鄰岔肝肚腸龜免狂猶角刪條卵島迎飯飲系言凍狀畝况床庫療應冷這序辛棄冶忘閑間悶判竈燦弟汪沙汽沃泛溝沒沈沈懷憂快完宋宏牢究窮災良證啟評補初社識訴診詞譯君靈即層尿尾遲局改張忌際陸阿陳阻附妙妖妨努忍勁雞驅純紗納綱駁縱紛紙紋紡驢紐奉玩環武青責現表規抹攏拔揀擔坦押抽拐拖拍者頂拆擁抵拘勢抱垃拉攔拌幸招坡披撥擇擡其取苦若茂蘋苗英範直茄莖茅林枝杯櫃析板松槍構傑述枕喪或畫臥事刺棗雨賣礦碼廁奔奇奮態歐壟妻轟頃轉斬輪軟到非叔肯齒些虎虜腎賢尚旺具果味昆國昌暢明易昂典固忠咐呼鳴詠呢岸巖帖羅幟嶺凱敗販購圖釣制知垂牧物乖刮稈和季委佳侍供使例版侄偵側憑僑佩貨依的迫質欣征往爬彼徑所舍金命斧爸采受乳貪念貧肤肺肢腫脹朋股肥服脅周昏魚兔狐忽狗備飾飽飼變京享店夜廟府底劑郊廢凈盲放刻育閘鬧鄭券卷單炒炊炕炎爐沫淺法泄河沾淚油泊沿泡註瀉泳泥沸波潑澤治怖性怕憐怪學寶宗定宜審宙官空簾實試郎詩肩房誠襯衫視話誕詢該詳建肅錄隸居屆刷屈弦承孟孤陜降限妹姑姐姓始駕參艱線練組細駛織終駐駝紹經贯奏春幫珍玻毒型掛封持項垮挎城撓政赴趙擋挺括拴拾挑指墊掙擠拼挖按揮挪某甚革薦巷帶草繭茶荒茫蕩榮故胡南藥標枯柄棟相查柏柳柱柿欄樹要鹹威歪研磚厘厚砌砍面耐耍牽殘殃輕鴉皆背戰點臨覽豎省削嘗是盼眨哄顯啞冒映星昨畏趴胃貴界虹蝦蟻思螞雖品咽罵嘩咱響哈咬咳哪炭峽罰賤貼骨鈔鐘鋼鑰钩卸缸拜看矩怎牲選適秒香種秋科重復竿段便倆貸順修保促侮儉俗俘信皇泉鬼侵追俊盾待律很須敘劍逃食盆膽勝胞胖脈勉狹獅獨狡獄狠貿怨急饒蝕餃餅彎將獎哀亭亮度跡庭瘡瘋疫疤姿親音帝施聞閥閣差养美姜叛送類迷前首逆總煉炸炮爛剃潔洪灑澆濁洞測洗活派洽染濟洋洲渾濃津恒恢恰惱恨舉覺宣室宮宪突穿竊客冠語扁襖祖神祝誤誘說誦墾退既屋晝費陡眉孩除險院娃姥姨姻嬌怒架賀盈勇怠柔壘綁絨結绕驕繪給絡駱絕絞統耕耗艷泰珠班素蠶頑盞匪撈栽捕振載趕起鹽捎捏埋捉捆捐損都哲逝撿換挽熱恐壺挨恥耽恭蓮莫荷獲晉惡真框桂檔桐株橋桃格校核樣根索哥速逗栗配翅辱唇夏礎破原套逐烈殊顧轎較頓毙致柴桌慮監緊黨曬眠曉鴨晃晌暈蚊哨哭恩喚啊唉罷峰圓賊賄錢鉗鉆鐵鈴鉛缺氧特犧造乘敵秤租積秧秩稱秘透筆笑筍債借值倚傾倒倘俱倡候俯倍倦健臭射躬息徒徐艦艙般航途拿爹愛頌翁脆脂胸胳臟膠腦狸狼逢留皺餓戀槳漿衰高席準座脊癥病疾疼疲效離唐資涼站剖競部旁旅畜閱羞瓶拳粉料益兼烤烘煩燒烛煙遞濤浙澇酒涉消浩海塗浴浮流潤浪浸漲燙湧悟悄悔悅害寬家宵宴賓窄容宰案請朗諸讀扇襪袖袍被祥課誰調冤諒談誼剝懇展劇屑弱陵陶陷陪娛娘通能難預桑絹繡驗繼球理捧堵描域掩捷排掉堆推掀授教掏掠培接控探據掘職基著勒黃萌蘿菌菜萄菊萍菠營械夢梢梅檢梳梯桶救副票戚爽聾襲盛雪輔輛虛雀堂常匙晨睜瞇眼懸野啦晚啄距躍略蛇累唱患唯崖嶄崇圈銅鏟銀甜梨犁移笨籠笛符第敏做袋悠償偶偷您售停偏假得銜盤船斜盒鴿悉欲彩領腳脖臉脫象夠猜豬獵貓猛餡館湊減毫麻癢痕廊康庸鹿盜章竟商族旋望率著蓋粘粗粒斷剪獸清添淋淹渠漸混漁淘液淡深婆梁滲情惜慚悼懼惕驚慘慣寇寄宿窯密謀謊禍謎逮敢屠彈隨蛋隆隱婚嬸頸績緒續騎繩維綿綢綠琴斑替款堪搭塔越趁趨超提堤博揭喜插揪搜煮援裁擱摟攪握揉斯期欺聯散惹葬葛董葡敬蔥落朝辜葵棒棋植森椅椒棵棍棉棚棕惠惑逼廚廈硬確雁殖裂雄暫雅輩悲紫辉敞賞掌晴暑最量噴晶喇遇喊景踐跌跑遺蛙蛛蜓喝餵喘喉幅帽賭賠黑鑄鋪鏈銷鎖鋤鍋銹鋒銳短智毯鵝剩稍程稀稅筐等築策篩筒答筋箏傲傅牌堡集焦傍儲奧街懲禦循艇舒番釋禽臘脾腔魯猾猴然饞裝蠻就痛童闊善羨普糞尊道曾焰港湖渣濕溫渴滑灣渡遊滋溉憤慌惰愧愉慨割寒富竄窩窗遍裕褲裙謝謠謙屬屢強粥疏隔隙絮嫂登緞緩編騙緣瑞魂肆攝摸填搏塌鼓擺攜搬搖搞塘攤蒜勤鵲藍墓幕蓬蓄蒙蒸獻禁楚想槐榆楼概賴酬感礙碑碎碰碗碌雷零霧雹輸督齡鑒睛睡睬鄙愚暖盟歇暗照跨跳跪路跟遣蛾蜂嗓置罪罩錯錫鑼锤錦鍵鋸矮辭稠愁籌簽簡毀舅鼠催傻像躲微愈遙腰腥腹騰腿觸解醬痰廉新韻意糧數煎塑慈煤煌滿漠源滤濫滔溪溜滾濱粱灘慎譽塞謹福群殿辟障嫌嫁叠縫纏靜碧璃墻撇嘉摧截誓境摘摔聚蔽慕暮蔑模榴榜榨歌遭酷釀酸磁願需弊裳顆嗽蜻蠟蠅蜘賺鍬鍛舞穩算籮管僚鼻魄貌膜膊膀鮮疑饅裹敲豪膏遮腐瘦辣竭端旗精歉熄熔漆漂漫滴演漏慢寨賽察蜜譜嫩翠熊凳騾縮慧撕撒趣趟撐播撞撤增聰鞋蕉蔬橫槽櫻橡飄醋醉震黴瞞題暴瞎影踢踏踩蹤蝶蝴囑墨鎮靠稻黎稿稼箱箭篇僵躺僻德艘膝膛熟摩顏毅糊遵潛潮懂額慰劈操燕薯薪薄顛橘整融醒餐嘴蹄器贈默鏡贊籃邀衡膨雕磨凝辨辯糖糕燃澡激懶壁避繳戴擦鞠藏霜霞瞧蹈螺穗繁辮贏糟糠燥臂翼驟鞭覆蹦鐮翻鷹警攀蹲顫瓣爆疆壤耀躁嚼嚷籍魔灌蠢霸露囊罐匕刁丐歹戈夭侖譏冗鄧艾夯凸盧叭嘰皿凹囚矢乍爾馮玄邦迂邢芋芍吏夷籲呂吆屹廷迄臼仲倫伊肋旭匈鳧妝亥汛諱訝訛訟诀弛阱馱馴紉玖瑪韌摳扼汞扳掄坎塢抑擬抒芙蕪葦芥芯芭杖杉巫杈甫匣軒鹵肖吱吠嘔吶吟嗆吻吭邑囤吮嶇牡佑佃伺囪肛肘甸狽鳩彤灸刨庇吝廬閏兌灼沐沛汰瀝淪洶滄滬忱詛詐罕屁墜妓姊妒緯玫卦坷坯拓坪坤拄擰拂拙拇拗茉昔苛苫茍苞茁苔枉樞枚楓杭郁礬奈奄毆歧卓曇哎咕呵嚨呻咒咆咖帕賬貶貯氛秉嶽侠僥侶侈卑劊剎肴覓忿甕骯肪獰龐瘧疙疚卒氓炬沽沮泣濘泌沼怔怯寵宛衩祈詭帚屜弧彌陋陌函姆虱叁绅駒絆繹契貳玷玲珊拭拷拱挾垢垛拯荊茸茬莢茵茴蕎薺葷熒荔棧柑柵檸枷勃柬砂泵硯鷗軸韭虐昧盹咧昵昭盅勛哆咪喲幽鈣鈍鈉欽鈞鈕氈氫秕俏俄俐侯徊衍胚朧胎猙餌巒奕咨颯閨閩籽婁爍炫窪柒涎洛恃恍恬恤宦誡誣祠誨屏屎遜隕姚娜蚤駭耘耙秦匿埂捂捍袁捌挫摯搗捅埃耿聶荸莽萊莉瑩鶯梆棲樺栓桅樁賈酌砸砰礫殉逞哮嘮哺剔蚌蚜畔蚣蚪蚓哩圃鴦唁哼唆峭唧峻賂贓鉀鉚氨秫笆俺賃倔殷聳舀豺豹頒胯胰臍脓逛卿鴕鴛餒淩淒衷郭齋疹紊瓷羔烙浦渦渙滌澗涕澀悍憫竅諾誹袒諄祟恕娩駿瑣麩琉瑯措捺捶赦埠撚掐掂掖擲撣摻勘聊娶菱菲萎菩螢乾蕭薩菇彬梗梧梭曹醞酗廂矽碩奢盔匾顱彪眶晤曼晦冕啡畦趾啃蛆蚯蛉蛀唬啰唾啤啥嘯崎邏崔崩嬰賒銬鐺鋁鍘銑銘矯稭穢笙笤偎傀軀兜釁徘徙舶舷舵斂翎脯逸凰猖祭烹庶庵痊閻闡眷焊煥鴻涯淑淌淮淆淵淫淳淤澱涮涵惦悴惋寂窒諜諧襠袱禱謁謂諺尉墮隅婉頗綽繃綜綻綴巢琳琢瓊揍堰揩攬揖彭揣攙搓壹搔葫募蔣蒂韓棱椰焚椎棺榔橢粟棘酣酥硝硫頰靂翹鑿棠晰鼎喳遏晾疇跋跛蛔蜒蛤鵑喻啼喧嵌賦贖賜銼鋅甥掰氮氯黍筏牘粵逾腌腋腕猩猬憊敦痘痢瘓竣翔奠遂焙滯湘渤渺潰濺湃愕惶寓窖窘雇謗犀隘媒媚婿緬纜締縷騷瑟鵡瑰搪聘斟靴靶蓖蒿蒲蓉楔椿楷欖楞楣酪碘硼碉輻輯頻睹睦瞄嗜嗦暇畸蹺跺蜈蝸蛻蛹嗅嗡嗤署蜀幌錨錐鍁錠錳稚頹筷魁衙膩腮腺鵬肄猿穎煞雛饃餾稟痹廓癡靖誊漓溢溯溶滓溺寞窺窟寢褂裸謬媳嫉縛繽剿贅熬赫蔫摹蔓蔗藹熙蔚兢榛榕酵碟碴堿碳轅轄雌墅嘁踴蟬嘀幔鍍舔熏箍箕簫輿僧孵瘩瘟彰粹漱漩漾慷寡寥譚褐褪隧嫡纓攆撩撮撬擒墩撰鞍蕊蘊樊樟橄敷豌醇磕磅碾憋嘶嘲嘹蝠蠍蝌蝗蝙嘿幢鑷鎬稽簍膘鯉鯽褒癟瘤癱凜澎潭潦澳潘澈瀾澄憔懊憎翩褥譴鶴憨履嬉豫缭撼擂擅蕾薛薇擎翰噩櫥橙瓢蟥霍霎轍冀踱蹂蟆螃螟噪鸚黔穆篡篷篙籬儒膳鯨癮瘸糙燎瀕憾懈窿韁壕藐檬檐檁檀礁磷瞭瞬瞳瞪曙蹋蟋蟀嚎贍鐐魏簇儡徽爵朦臊鱷糜癌懦豁臀藕藤瞻囂鰭癩瀑襟璧戳攢孽蘑藻鱉蹭蹬簸簿蟹靡癬羹鬢攘蠕巍鱗糯譬霹躪髓蘸鑲瓤矗" - -korean_alphabet = "가각갂갃간갅갆갇갈갉갊갋갌갍갎갏감갑값갓갔강갖갗갘같갚갛개객갞갟갠갡갢갣갤갥갦갧갨갩갪갫갬갭갮갯갰갱갲갳갴갵갶갷갸갹갺갻갼갽갾갿걀걁걂걃걄걅걆걇걈걉걊걋걌걍걎걏걐걑걒걓걔걕걖걗걘걙걚걛걜걝걞걟걠걡걢걣걤걥걦걧걨걩걪걫걬걭걮걯거걱걲걳건걵걶걷걸걹걺걻걼걽걾걿검겁겂것겄겅겆겇겈겉겊겋게겍겎겏겐겑겒겓겔겕겖겗겘겙겚겛겜겝겞겟겠겡겢겣겤겥겦겧겨격겪겫견겭겮겯결겱겲겳겴겵겶겷겸겹겺겻겼경겾겿곀곁곂곃계곅곆곇곈곉곊곋곌곍곎곏곐곑곒곓곔곕곖곗곘곙곚곛곜곝곞곟고곡곢곣곤곥곦곧골곩곪곫곬곭곮곯곰곱곲곳곴공곶곷곸곹곺곻과곽곾곿관괁괂괃괄괅괆괇괈괉괊괋괌괍괎괏괐광괒괓괔괕괖괗괘괙괚괛괜괝괞괟괠괡괢괣괤괥괦괧괨괩괪괫괬괭괮괯괰괱괲괳괴괵괶괷괸괹괺괻괼괽괾괿굀굁굂굃굄굅굆굇굈굉굊굋굌굍굎굏교굑굒굓굔굕굖굗굘굙굚굛굜굝굞굟굠굡굢굣굤굥굦굧굨굩굪굫구국굮굯군굱굲굳굴굵굶굷굸굹굺굻굼굽굾굿궀궁궂궃궄궅궆궇궈궉궊궋권궍궎궏궐궑궒궓궔궕궖궗궘궙궚궛궜궝궞궟궠궡궢궣궤궥궦궧궨궩궪궫궬궭궮궯궰궱궲궳궴궵궶궷궸궹궺궻궼궽궾궿귀귁귂귃귄귅귆귇귈귉귊귋귌귍귎귏귐귑귒귓귔귕귖귗귘귙귚귛규귝귞귟균귡귢귣귤귥귦귧귨귩귪귫귬귭귮귯귰귱귲귳귴귵귶귷그극귺귻근귽귾귿글긁긂긃긄긅긆긇금급긊긋긌긍긎긏긐긑긒긓긔긕긖긗긘긙긚긛긜긝긞긟긠긡긢긣긤긥긦긧긨긩긪긫긬긭긮긯기긱긲긳긴긵긶긷길긹긺긻긼긽긾긿김깁깂깃깄깅깆깇깈깉깊깋까깍깎깏깐깑깒깓깔깕깖깗깘깙깚깛깜깝깞깟깠깡깢깣깤깥깦깧깨깩깪깫깬깭깮깯깰깱깲깳깴깵깶깷깸깹깺깻깼깽깾깿꺀꺁꺂꺃꺄꺅꺆꺇꺈꺉꺊꺋꺌꺍꺎꺏꺐꺑꺒꺓꺔꺕꺖꺗꺘꺙꺚꺛꺜꺝꺞꺟꺠꺡꺢꺣꺤꺥꺦꺧꺨꺩꺪꺫꺬꺭꺮꺯꺰꺱꺲꺳꺴꺵꺶꺷꺸꺹꺺꺻꺼꺽꺾꺿껀껁껂껃껄껅껆껇껈껉껊껋껌껍껎껏껐껑껒껓껔껕껖껗께껙껚껛껜껝껞껟껠껡껢껣껤껥껦껧껨껩껪껫껬껭껮껯껰껱껲껳껴껵껶껷껸껹껺껻껼껽껾껿꼀꼁꼂꼃꼄꼅꼆꼇꼈꼉꼊꼋꼌꼍꼎꼏꼐꼑꼒꼓꼔꼕꼖꼗꼘꼙꼚꼛꼜꼝꼞꼟꼠꼡꼢꼣꼤꼥꼦꼧꼨꼩꼪꼫꼬꼭꼮꼯꼰꼱꼲꼳꼴꼵꼶꼷꼸꼹꼺꼻꼼꼽꼾꼿꽀꽁꽂꽃꽄꽅꽆꽇꽈꽉꽊꽋꽌꽍꽎꽏꽐꽑꽒꽓꽔꽕꽖꽗꽘꽙꽚꽛꽜꽝꽞꽟꽠꽡꽢꽣꽤꽥꽦꽧꽨꽩꽪꽫꽬꽭꽮꽯꽰꽱꽲꽳꽴꽵꽶꽷꽸꽹꽺꽻꽼꽽꽾꽿꾀꾁꾂꾃꾄꾅꾆꾇꾈꾉꾊꾋꾌꾍꾎꾏꾐꾑꾒꾓꾔꾕꾖꾗꾘꾙꾚꾛꾜꾝꾞꾟꾠꾡꾢꾣꾤꾥꾦꾧꾨꾩꾪꾫꾬꾭꾮꾯꾰꾱꾲꾳꾴꾵꾶꾷꾸꾹꾺꾻꾼꾽꾾꾿꿀꿁꿂꿃꿄꿅꿆꿇꿈꿉꿊꿋꿌꿍꿎꿏꿐꿑꿒꿓꿔꿕꿖꿗꿘꿙꿚꿛꿜꿝꿞꿟꿠꿡꿢꿣꿤꿥꿦꿧꿨꿩꿪꿫꿬꿭꿮꿯꿰꿱꿲꿳꿴꿵꿶꿷꿸꿹꿺꿻꿼꿽꿾꿿뀀뀁뀂뀃뀄뀅뀆뀇뀈뀉뀊뀋뀌뀍뀎뀏뀐뀑뀒뀓뀔뀕뀖뀗뀘뀙뀚뀛뀜뀝뀞뀟뀠뀡뀢뀣뀤뀥뀦뀧뀨뀩뀪뀫뀬뀭뀮뀯뀰뀱뀲뀳뀴뀵뀶뀷뀸뀹뀺뀻뀼뀽뀾뀿끀끁끂끃끄끅끆끇끈끉끊끋끌끍끎끏끐끑끒끓끔끕끖끗끘끙끚끛끜끝끞끟끠끡끢끣끤끥끦끧끨끩끪끫끬끭끮끯끰끱끲끳끴끵끶끷끸끹끺끻끼끽끾끿낀낁낂낃낄낅낆낇낈낉낊낋낌낍낎낏낐낑낒낓낔낕낖낗나낙낚낛난낝낞낟날낡낢낣낤낥낦낧남납낪낫났낭낮낯낰낱낲낳내낵낶낷낸낹낺낻낼낽낾낿냀냁냂냃냄냅냆냇냈냉냊냋냌냍냎냏냐냑냒냓냔냕냖냗냘냙냚냛냜냝냞냟냠냡냢냣냤냥냦냧냨냩냪냫냬냭냮냯냰냱냲냳냴냵냶냷냸냹냺냻냼냽냾냿넀넁넂넃넄넅넆넇너넉넊넋넌넍넎넏널넑넒넓넔넕넖넗넘넙넚넛넜넝넞넟넠넡넢넣네넥넦넧넨넩넪넫넬넭넮넯넰넱넲넳넴넵넶넷넸넹넺넻넼넽넾넿녀녁녂녃년녅녆녇녈녉녊녋녌녍녎녏념녑녒녓녔녕녖녗녘녙녚녛녜녝녞녟녠녡녢녣녤녥녦녧녨녩녪녫녬녭녮녯녰녱녲녳녴녵녶녷노녹녺녻논녽녾녿놀놁놂놃놄놅놆놇놈놉놊놋놌농놎놏놐놑높놓놔놕놖놗놘놙놚놛놜놝놞놟놠놡놢놣놤놥놦놧놨놩놪놫놬놭놮놯놰놱놲놳놴놵놶놷놸놹놺놻놼놽놾놿뇀뇁뇂뇃뇄뇅뇆뇇뇈뇉뇊뇋뇌뇍뇎뇏뇐뇑뇒뇓뇔뇕뇖뇗뇘뇙뇚뇛뇜뇝뇞뇟뇠뇡뇢뇣뇤뇥뇦뇧뇨뇩뇪뇫뇬뇭뇮뇯뇰뇱뇲뇳뇴뇵뇶뇷뇸뇹뇺뇻뇼뇽뇾뇿눀눁눂눃누눅눆눇눈눉눊눋눌눍눎눏눐눑눒눓눔눕눖눗눘눙눚눛눜눝눞눟눠눡눢눣눤눥눦눧눨눩눪눫눬눭눮눯눰눱눲눳눴눵눶눷눸눹눺눻눼눽눾눿뉀뉁뉂뉃뉄뉅뉆뉇뉈뉉뉊뉋뉌뉍뉎뉏뉐뉑뉒뉓뉔뉕뉖뉗뉘뉙뉚뉛뉜뉝뉞뉟뉠뉡뉢뉣뉤뉥뉦뉧뉨뉩뉪뉫뉬뉭뉮뉯뉰뉱뉲뉳뉴뉵뉶뉷뉸뉹뉺뉻뉼뉽뉾뉿늀늁늂늃늄늅늆늇늈늉늊늋늌늍늎늏느늑늒늓는늕늖늗늘늙늚늛늜늝늞늟늠늡늢늣늤능늦늧늨늩늪늫늬늭늮늯늰늱늲늳늴늵늶늷늸늹늺늻늼늽늾늿닀닁닂닃닄닅닆닇니닉닊닋닌닍닎닏닐닑닒닓닔닕닖닗님닙닚닛닜닝닞닟닠닡닢닣다닥닦닧단닩닪닫달닭닮닯닰닱닲닳담답닶닷닸당닺닻닼닽닾닿대댁댂댃댄댅댆댇댈댉댊댋댌댍댎댏댐댑댒댓댔댕댖댗댘댙댚댛댜댝댞댟댠댡댢댣댤댥댦댧댨댩댪댫댬댭댮댯댰댱댲댳댴댵댶댷댸댹댺댻댼댽댾댿덀덁덂덃덄덅덆덇덈덉덊덋덌덍덎덏덐덑덒덓더덕덖덗던덙덚덛덜덝덞덟덠덡덢덣덤덥덦덧덨덩덪덫덬덭덮덯데덱덲덳덴덵덶덷델덹덺덻덼덽덾덿뎀뎁뎂뎃뎄뎅뎆뎇뎈뎉뎊뎋뎌뎍뎎뎏뎐뎑뎒뎓뎔뎕뎖뎗뎘뎙뎚뎛뎜뎝뎞뎟뎠뎡뎢뎣뎤뎥뎦뎧뎨뎩뎪뎫뎬뎭뎮뎯뎰뎱뎲뎳뎴뎵뎶뎷뎸뎹뎺뎻뎼뎽뎾뎿돀돁돂돃도독돆돇돈돉돊돋돌돍돎돏돐돑돒돓돔돕돖돗돘동돚돛돜돝돞돟돠돡돢돣돤돥돦돧돨돩돪돫돬돭돮돯돰돱돲돳돴돵돶돷돸돹돺돻돼돽돾돿됀됁됂됃됄됅됆됇됈됉됊됋됌됍됎됏됐됑됒됓됔됕됖됗되됙됚됛된됝됞됟될됡됢됣됤됥됦됧됨됩됪됫됬됭됮됯됰됱됲됳됴됵됶됷됸됹됺됻됼됽됾됿둀둁둂둃둄둅둆둇둈둉둊둋둌둍둎둏두둑둒둓둔둕둖둗둘둙둚둛둜둝둞둟둠둡둢둣둤둥둦둧둨둩둪둫둬둭둮둯둰둱둲둳둴둵둶둷둸둹둺둻둼둽둾둿뒀뒁뒂뒃뒄뒅뒆뒇뒈뒉뒊뒋뒌뒍뒎뒏뒐뒑뒒뒓뒔뒕뒖뒗뒘뒙뒚뒛뒜뒝뒞뒟뒠뒡뒢뒣뒤뒥뒦뒧뒨뒩뒪뒫뒬뒭뒮뒯뒰뒱뒲뒳뒴뒵뒶뒷뒸뒹뒺뒻뒼뒽뒾뒿듀듁듂듃듄듅듆듇듈듉듊듋듌듍듎듏듐듑듒듓듔듕듖듗듘듙듚듛드득듞듟든듡듢듣들듥듦듧듨듩듪듫듬듭듮듯듰등듲듳듴듵듶듷듸듹듺듻듼듽듾듿딀딁딂딃딄딅딆딇딈딉딊딋딌딍딎딏딐딑딒딓디딕딖딗딘딙딚딛딜딝딞딟딠딡딢딣딤딥딦딧딨딩딪딫딬딭딮딯따딱딲딳딴딵딶딷딸딹딺딻딼딽딾딿땀땁땂땃땄땅땆땇땈땉땊땋때땍땎땏땐땑땒땓땔땕땖땗땘땙땚땛땜땝땞땟땠땡땢땣땤땥땦땧땨땩땪땫땬땭땮땯땰땱땲땳땴땵땶땷땸땹땺땻땼땽땾땿떀떁떂떃떄떅떆떇떈떉떊떋떌떍떎떏떐떑떒떓떔떕떖떗떘떙떚떛떜떝떞떟떠떡떢떣떤떥떦떧떨떩떪떫떬떭떮떯떰떱떲떳떴떵떶떷떸떹떺떻떼떽떾떿뗀뗁뗂뗃뗄뗅뗆뗇뗈뗉뗊뗋뗌뗍뗎뗏뗐뗑뗒뗓뗔뗕뗖뗗뗘뗙뗚뗛뗜뗝뗞뗟뗠뗡뗢뗣뗤뗥뗦뗧뗨뗩뗪뗫뗬뗭뗮뗯뗰뗱뗲뗳뗴뗵뗶뗷뗸뗹뗺뗻뗼뗽뗾뗿똀똁똂똃똄똅똆똇똈똉똊똋똌똍똎똏또똑똒똓똔똕똖똗똘똙똚똛똜똝똞똟똠똡똢똣똤똥똦똧똨똩똪똫똬똭똮똯똰똱똲똳똴똵똶똷똸똹똺똻똼똽똾똿뙀뙁뙂뙃뙄뙅뙆뙇뙈뙉뙊뙋뙌뙍뙎뙏뙐뙑뙒뙓뙔뙕뙖뙗뙘뙙뙚뙛뙜뙝뙞뙟뙠뙡뙢뙣뙤뙥뙦뙧뙨뙩뙪뙫뙬뙭뙮뙯뙰뙱뙲뙳뙴뙵뙶뙷뙸뙹뙺뙻뙼뙽뙾뙿뚀뚁뚂뚃뚄뚅뚆뚇뚈뚉뚊뚋뚌뚍뚎뚏뚐뚑뚒뚓뚔뚕뚖뚗뚘뚙뚚뚛뚜뚝뚞뚟뚠뚡뚢뚣뚤뚥뚦뚧뚨뚩뚪뚫뚬뚭뚮뚯뚰뚱뚲뚳뚴뚵뚶뚷뚸뚹뚺뚻뚼뚽뚾뚿뛀뛁뛂뛃뛄뛅뛆뛇뛈뛉뛊뛋뛌뛍뛎뛏뛐뛑뛒뛓뛔뛕뛖뛗뛘뛙뛚뛛뛜뛝뛞뛟뛠뛡뛢뛣뛤뛥뛦뛧뛨뛩뛪뛫뛬뛭뛮뛯뛰뛱뛲뛳뛴뛵뛶뛷뛸뛹뛺뛻뛼뛽뛾뛿뜀뜁뜂뜃뜄뜅뜆뜇뜈뜉뜊뜋뜌뜍뜎뜏뜐뜑뜒뜓뜔뜕뜖뜗뜘뜙뜚뜛뜜뜝뜞뜟뜠뜡뜢뜣뜤뜥뜦뜧뜨뜩뜪뜫뜬뜭뜮뜯뜰뜱뜲뜳뜴뜵뜶뜷뜸뜹뜺뜻뜼뜽뜾뜿띀띁띂띃띄띅띆띇띈띉띊띋띌띍띎띏띐띑띒띓띔띕띖띗띘띙띚띛띜띝띞띟띠띡띢띣띤띥띦띧띨띩띪띫띬띭띮띯띰띱띲띳띴띵띶띷띸띹띺띻라락띾띿란랁랂랃랄랅랆랇랈랉랊랋람랍랎랏랐랑랒랓랔랕랖랗래랙랚랛랜랝랞랟랠랡랢랣랤랥랦랧램랩랪랫랬랭랮랯랰랱랲랳랴략랶랷랸랹랺랻랼랽랾랿럀럁럂럃럄럅럆럇럈량럊럋럌럍럎럏럐럑럒럓럔럕럖럗럘럙럚럛럜럝럞럟럠럡럢럣럤럥럦럧럨럩럪럫러럭럮럯런럱럲럳럴럵럶럷럸럹럺럻럼럽럾럿렀렁렂렃렄렅렆렇레렉렊렋렌렍렎렏렐렑렒렓렔렕렖렗렘렙렚렛렜렝렞렟렠렡렢렣려력렦렧련렩렪렫렬렭렮렯렰렱렲렳렴렵렶렷렸령렺렻렼렽렾렿례롁롂롃롄롅롆롇롈롉롊롋롌롍롎롏롐롑롒롓롔롕롖롗롘롙롚롛로록롞롟론롡롢롣롤롥롦롧롨롩롪롫롬롭롮롯롰롱롲롳롴롵롶롷롸롹롺롻롼롽롾롿뢀뢁뢂뢃뢄뢅뢆뢇뢈뢉뢊뢋뢌뢍뢎뢏뢐뢑뢒뢓뢔뢕뢖뢗뢘뢙뢚뢛뢜뢝뢞뢟뢠뢡뢢뢣뢤뢥뢦뢧뢨뢩뢪뢫뢬뢭뢮뢯뢰뢱뢲뢳뢴뢵뢶뢷뢸뢹뢺뢻뢼뢽뢾뢿룀룁룂룃룄룅룆룇룈룉룊룋료룍룎룏룐룑룒룓룔룕룖룗룘룙룚룛룜룝룞룟룠룡룢룣룤룥룦룧루룩룪룫룬룭룮룯룰룱룲룳룴룵룶룷룸룹룺룻룼룽룾룿뤀뤁뤂뤃뤄뤅뤆뤇뤈뤉뤊뤋뤌뤍뤎뤏뤐뤑뤒뤓뤔뤕뤖뤗뤘뤙뤚뤛뤜뤝뤞뤟뤠뤡뤢뤣뤤뤥뤦뤧뤨뤩뤪뤫뤬뤭뤮뤯뤰뤱뤲뤳뤴뤵뤶뤷뤸뤹뤺뤻뤼뤽뤾뤿륀륁륂륃륄륅륆륇륈륉륊륋륌륍륎륏륐륑륒륓륔륕륖륗류륙륚륛륜륝륞륟률륡륢륣륤륥륦륧륨륩륪륫륬륭륮륯륰륱륲륳르륵륶륷른륹륺륻를륽륾륿릀릁릂릃름릅릆릇릈릉릊릋릌릍릎릏릐릑릒릓릔릕릖릗릘릙릚릛릜릝릞릟릠릡릢릣릤릥릦릧릨릩릪릫리릭릮릯린릱릲릳릴릵릶릷릸릹릺릻림립릾릿맀링맂맃맄맅맆맇마막맊맋만맍많맏말맑맒맓맔맕맖맗맘맙맚맛맜망맞맟맠맡맢맣매맥맦맧맨맩맪맫맬맭맮맯맰맱맲맳맴맵맶맷맸맹맺맻맼맽맾맿먀먁먂먃먄먅먆먇먈먉먊먋먌먍먎먏먐먑먒먓먔먕먖먗먘먙먚먛먜먝먞먟먠먡먢먣먤먥먦먧먨먩먪먫먬먭먮먯먰먱먲먳먴먵먶먷머먹먺먻먼먽먾먿멀멁멂멃멄멅멆멇멈멉멊멋멌멍멎멏멐멑멒멓메멕멖멗멘멙멚멛멜멝멞멟멠멡멢멣멤멥멦멧멨멩멪멫멬멭멮멯며멱멲멳면멵멶멷멸멹멺멻멼멽멾멿몀몁몂몃몄명몆몇몈몉몊몋몌몍몎몏몐몑몒몓몔몕몖몗몘몙몚몛몜몝몞몟몠몡몢몣몤몥몦몧모목몪몫몬몭몮몯몰몱몲몳몴몵몶몷몸몹몺못몼몽몾몿뫀뫁뫂뫃뫄뫅뫆뫇뫈뫉뫊뫋뫌뫍뫎뫏뫐뫑뫒뫓뫔뫕뫖뫗뫘뫙뫚뫛뫜뫝뫞뫟뫠뫡뫢뫣뫤뫥뫦뫧뫨뫩뫪뫫뫬뫭뫮뫯뫰뫱뫲뫳뫴뫵뫶뫷뫸뫹뫺뫻뫼뫽뫾뫿묀묁묂묃묄묅묆묇묈묉묊묋묌묍묎묏묐묑묒묓묔묕묖묗묘묙묚묛묜묝묞묟묠묡묢묣묤묥묦묧묨묩묪묫묬묭묮묯묰묱묲묳무묵묶묷문묹묺묻물묽묾묿뭀뭁뭂뭃뭄뭅뭆뭇뭈뭉뭊뭋뭌뭍뭎뭏뭐뭑뭒뭓뭔뭕뭖뭗뭘뭙뭚뭛뭜뭝뭞뭟뭠뭡뭢뭣뭤뭥뭦뭧뭨뭩뭪뭫뭬뭭뭮뭯뭰뭱뭲뭳뭴뭵뭶뭷뭸뭹뭺뭻뭼뭽뭾뭿뮀뮁뮂뮃뮄뮅뮆뮇뮈뮉뮊뮋뮌뮍뮎뮏뮐뮑뮒뮓뮔뮕뮖뮗뮘뮙뮚뮛뮜뮝뮞뮟뮠뮡뮢뮣뮤뮥뮦뮧뮨뮩뮪뮫뮬뮭뮮뮯뮰뮱뮲뮳뮴뮵뮶뮷뮸뮹뮺뮻뮼뮽뮾뮿므믁믂믃믄믅믆믇믈믉믊믋믌믍믎믏믐믑믒믓믔믕믖믗믘믙믚믛믜믝믞믟믠믡믢믣믤믥믦믧믨믩믪믫믬믭믮믯믰믱믲믳믴믵믶믷미믹믺믻민믽믾믿밀밁밂밃밄밅밆밇밈밉밊밋밌밍밎및밐밑밒밓바박밖밗반밙밚받발밝밞밟밠밡밢밣밤밥밦밧밨방밪밫밬밭밮밯배백밲밳밴밵밶밷밸밹밺밻밼밽밾밿뱀뱁뱂뱃뱄뱅뱆뱇뱈뱉뱊뱋뱌뱍뱎뱏뱐뱑뱒뱓뱔뱕뱖뱗뱘뱙뱚뱛뱜뱝뱞뱟뱠뱡뱢뱣뱤뱥뱦뱧뱨뱩뱪뱫뱬뱭뱮뱯뱰뱱뱲뱳뱴뱵뱶뱷뱸뱹뱺뱻뱼뱽뱾뱿벀벁벂벃버벅벆벇번벉벊벋벌벍벎벏벐벑벒벓범법벖벗벘벙벚벛벜벝벞벟베벡벢벣벤벥벦벧벨벩벪벫벬벭벮벯벰벱벲벳벴벵벶벷벸벹벺벻벼벽벾벿변볁볂볃별볅볆볇볈볉볊볋볌볍볎볏볐병볒볓볔볕볖볗볘볙볚볛볜볝볞볟볠볡볢볣볤볥볦볧볨볩볪볫볬볭볮볯볰볱볲볳보복볶볷본볹볺볻볼볽볾볿봀봁봂봃봄봅봆봇봈봉봊봋봌봍봎봏봐봑봒봓봔봕봖봗봘봙봚봛봜봝봞봟봠봡봢봣봤봥봦봧봨봩봪봫봬봭봮봯봰봱봲봳봴봵봶봷봸봹봺봻봼봽봾봿뵀뵁뵂뵃뵄뵅뵆뵇뵈뵉뵊뵋뵌뵍뵎뵏뵐뵑뵒뵓뵔뵕뵖뵗뵘뵙뵚뵛뵜뵝뵞뵟뵠뵡뵢뵣뵤뵥뵦뵧뵨뵩뵪뵫뵬뵭뵮뵯뵰뵱뵲뵳뵴뵵뵶뵷뵸뵹뵺뵻뵼뵽뵾뵿부북붂붃분붅붆붇불붉붊붋붌붍붎붏붐붑붒붓붔붕붖붗붘붙붚붛붜붝붞붟붠붡붢붣붤붥붦붧붨붩붪붫붬붭붮붯붰붱붲붳붴붵붶붷붸붹붺붻붼붽붾붿뷀뷁뷂뷃뷄뷅뷆뷇뷈뷉뷊뷋뷌뷍뷎뷏뷐뷑뷒뷓뷔뷕뷖뷗뷘뷙뷚뷛뷜뷝뷞뷟뷠뷡뷢뷣뷤뷥뷦뷧뷨뷩뷪뷫뷬뷭뷮뷯뷰뷱뷲뷳뷴뷵뷶뷷뷸뷹뷺뷻뷼뷽뷾뷿븀븁븂븃븄븅븆븇븈븉븊븋브븍븎븏븐븑븒븓블븕븖븗븘븙븚븛븜븝븞븟븠븡븢븣븤븥븦븧븨븩븪븫븬븭븮븯븰븱븲븳븴븵븶븷븸븹븺븻븼븽븾븿빀빁빂빃비빅빆빇빈빉빊빋빌빍빎빏빐빑빒빓빔빕빖빗빘빙빚빛빜빝빞빟빠빡빢빣빤빥빦빧빨빩빪빫빬빭빮빯빰빱빲빳빴빵빶빷빸빹빺빻빼빽빾빿뺀뺁뺂뺃뺄뺅뺆뺇뺈뺉뺊뺋뺌뺍뺎뺏뺐뺑뺒뺓뺔뺕뺖뺗뺘뺙뺚뺛뺜뺝뺞뺟뺠뺡뺢뺣뺤뺥뺦뺧뺨뺩뺪뺫뺬뺭뺮뺯뺰뺱뺲뺳뺴뺵뺶뺷뺸뺹뺺뺻뺼뺽뺾뺿뻀뻁뻂뻃뻄뻅뻆뻇뻈뻉뻊뻋뻌뻍뻎뻏뻐뻑뻒뻓뻔뻕뻖뻗뻘뻙뻚뻛뻜뻝뻞뻟뻠뻡뻢뻣뻤뻥뻦뻧뻨뻩뻪뻫뻬뻭뻮뻯뻰뻱뻲뻳뻴뻵뻶뻷뻸뻹뻺뻻뻼뻽뻾뻿뼀뼁뼂뼃뼄뼅뼆뼇뼈뼉뼊뼋뼌뼍뼎뼏뼐뼑뼒뼓뼔뼕뼖뼗뼘뼙뼚뼛뼜뼝뼞뼟뼠뼡뼢뼣뼤뼥뼦뼧뼨뼩뼪뼫뼬뼭뼮뼯뼰뼱뼲뼳뼴뼵뼶뼷뼸뼹뼺뼻뼼뼽뼾뼿뽀뽁뽂뽃뽄뽅뽆뽇뽈뽉뽊뽋뽌뽍뽎뽏뽐뽑뽒뽓뽔뽕뽖뽗뽘뽙뽚뽛뽜뽝뽞뽟뽠뽡뽢뽣뽤뽥뽦뽧뽨뽩뽪뽫뽬뽭뽮뽯뽰뽱뽲뽳뽴뽵뽶뽷뽸뽹뽺뽻뽼뽽뽾뽿뾀뾁뾂뾃뾄뾅뾆뾇뾈뾉뾊뾋뾌뾍뾎뾏뾐뾑뾒뾓뾔뾕뾖뾗뾘뾙뾚뾛뾜뾝뾞뾟뾠뾡뾢뾣뾤뾥뾦뾧뾨뾩뾪뾫뾬뾭뾮뾯뾰뾱뾲뾳뾴뾵뾶뾷뾸뾹뾺뾻뾼뾽뾾뾿뿀뿁뿂뿃뿄뿅뿆뿇뿈뿉뿊뿋뿌뿍뿎뿏뿐뿑뿒뿓뿔뿕뿖뿗뿘뿙뿚뿛뿜뿝뿞뿟뿠뿡뿢뿣뿤뿥뿦뿧뿨뿩뿪뿫뿬뿭뿮뿯뿰뿱뿲뿳뿴뿵뿶뿷뿸뿹뿺뿻뿼뿽뿾뿿쀀쀁쀂쀃쀄쀅쀆쀇쀈쀉쀊쀋쀌쀍쀎쀏쀐쀑쀒쀓쀔쀕쀖쀗쀘쀙쀚쀛쀜쀝쀞쀟쀠쀡쀢쀣쀤쀥쀦쀧쀨쀩쀪쀫쀬쀭쀮쀯쀰쀱쀲쀳쀴쀵쀶쀷쀸쀹쀺쀻쀼쀽쀾쀿쁀쁁쁂쁃쁄쁅쁆쁇쁈쁉쁊쁋쁌쁍쁎쁏쁐쁑쁒쁓쁔쁕쁖쁗쁘쁙쁚쁛쁜쁝쁞쁟쁠쁡쁢쁣쁤쁥쁦쁧쁨쁩쁪쁫쁬쁭쁮쁯쁰쁱쁲쁳쁴쁵쁶쁷쁸쁹쁺쁻쁼쁽쁾쁿삀삁삂삃삄삅삆삇삈삉삊삋삌삍삎삏삐삑삒삓삔삕삖삗삘삙삚삛삜삝삞삟삠삡삢삣삤삥삦삧삨삩삪삫사삭삮삯산삱삲삳살삵삶삷삸삹삺삻삼삽삾삿샀상샂샃샄샅샆샇새색샊샋샌샍샎샏샐샑샒샓샔샕샖샗샘샙샚샛샜생샞샟샠샡샢샣샤샥샦샧샨샩샪샫샬샭샮샯샰샱샲샳샴샵샶샷샸샹샺샻샼샽샾샿섀섁섂섃섄섅섆섇섈섉섊섋섌섍섎섏섐섑섒섓섔섕섖섗섘섙섚섛서석섞섟선섡섢섣설섥섦섧섨섩섪섫섬섭섮섯섰성섲섳섴섵섶섷세섹섺섻센섽섾섿셀셁셂셃셄셅셆셇셈셉셊셋셌셍셎셏셐셑셒셓셔셕셖셗션셙셚셛셜셝셞셟셠셡셢셣셤셥셦셧셨셩셪셫셬셭셮셯셰셱셲셳셴셵셶셷셸셹셺셻셼셽셾셿솀솁솂솃솄솅솆솇솈솉솊솋소속솎솏손솑솒솓솔솕솖솗솘솙솚솛솜솝솞솟솠송솢솣솤솥솦솧솨솩솪솫솬솭솮솯솰솱솲솳솴솵솶솷솸솹솺솻솼솽솾솿쇀쇁쇂쇃쇄쇅쇆쇇쇈쇉쇊쇋쇌쇍쇎쇏쇐쇑쇒쇓쇔쇕쇖쇗쇘쇙쇚쇛쇜쇝쇞쇟쇠쇡쇢쇣쇤쇥쇦쇧쇨쇩쇪쇫쇬쇭쇮쇯쇰쇱쇲쇳쇴쇵쇶쇷쇸쇹쇺쇻쇼쇽쇾쇿숀숁숂숃숄숅숆숇숈숉숊숋숌숍숎숏숐숑숒숓숔숕숖숗수숙숚숛순숝숞숟술숡숢숣숤숥숦숧숨숩숪숫숬숭숮숯숰숱숲숳숴숵숶숷숸숹숺숻숼숽숾숿쉀쉁쉂쉃쉄쉅쉆쉇쉈쉉쉊쉋쉌쉍쉎쉏쉐쉑쉒쉓쉔쉕쉖쉗쉘쉙쉚쉛쉜쉝쉞쉟쉠쉡쉢쉣쉤쉥쉦쉧쉨쉩쉪쉫쉬쉭쉮쉯쉰쉱쉲쉳쉴쉵쉶쉷쉸쉹쉺쉻쉼쉽쉾쉿슀슁슂슃슄슅슆슇슈슉슊슋슌슍슎슏슐슑슒슓슔슕슖슗슘슙슚슛슜슝슞슟슠슡슢슣스슥슦슧슨슩슪슫슬슭슮슯슰슱슲슳슴습슶슷슸승슺슻슼슽슾슿싀싁싂싃싄싅싆싇싈싉싊싋싌싍싎싏싐싑싒싓싔싕싖싗싘싙싚싛시식싞싟신싡싢싣실싥싦싧싨싩싪싫심십싮싯싰싱싲싳싴싵싶싷싸싹싺싻싼싽싾싿쌀쌁쌂쌃쌄쌅쌆쌇쌈쌉쌊쌋쌌쌍쌎쌏쌐쌑쌒쌓쌔쌕쌖쌗쌘쌙쌚쌛쌜쌝쌞쌟쌠쌡쌢쌣쌤쌥쌦쌧쌨쌩쌪쌫쌬쌭쌮쌯쌰쌱쌲쌳쌴쌵쌶쌷쌸쌹쌺쌻쌼쌽쌾쌿썀썁썂썃썄썅썆썇썈썉썊썋썌썍썎썏썐썑썒썓썔썕썖썗썘썙썚썛썜썝썞썟썠썡썢썣썤썥썦썧써썩썪썫썬썭썮썯썰썱썲썳썴썵썶썷썸썹썺썻썼썽썾썿쎀쎁쎂쎃쎄쎅쎆쎇쎈쎉쎊쎋쎌쎍쎎쎏쎐쎑쎒쎓쎔쎕쎖쎗쎘쎙쎚쎛쎜쎝쎞쎟쎠쎡쎢쎣쎤쎥쎦쎧쎨쎩쎪쎫쎬쎭쎮쎯쎰쎱쎲쎳쎴쎵쎶쎷쎸쎹쎺쎻쎼쎽쎾쎿쏀쏁쏂쏃쏄쏅쏆쏇쏈쏉쏊쏋쏌쏍쏎쏏쏐쏑쏒쏓쏔쏕쏖쏗쏘쏙쏚쏛쏜쏝쏞쏟쏠쏡쏢쏣쏤쏥쏦쏧쏨쏩쏪쏫쏬쏭쏮쏯쏰쏱쏲쏳쏴쏵쏶쏷쏸쏹쏺쏻쏼쏽쏾쏿쐀쐁쐂쐃쐄쐅쐆쐇쐈쐉쐊쐋쐌쐍쐎쐏쐐쐑쐒쐓쐔쐕쐖쐗쐘쐙쐚쐛쐜쐝쐞쐟쐠쐡쐢쐣쐤쐥쐦쐧쐨쐩쐪쐫쐬쐭쐮쐯쐰쐱쐲쐳쐴쐵쐶쐷쐸쐹쐺쐻쐼쐽쐾쐿쑀쑁쑂쑃쑄쑅쑆쑇쑈쑉쑊쑋쑌쑍쑎쑏쑐쑑쑒쑓쑔쑕쑖쑗쑘쑙쑚쑛쑜쑝쑞쑟쑠쑡쑢쑣쑤쑥쑦쑧쑨쑩쑪쑫쑬쑭쑮쑯쑰쑱쑲쑳쑴쑵쑶쑷쑸쑹쑺쑻쑼쑽쑾쑿쒀쒁쒂쒃쒄쒅쒆쒇쒈쒉쒊쒋쒌쒍쒎쒏쒐쒑쒒쒓쒔쒕쒖쒗쒘쒙쒚쒛쒜쒝쒞쒟쒠쒡쒢쒣쒤쒥쒦쒧쒨쒩쒪쒫쒬쒭쒮쒯쒰쒱쒲쒳쒴쒵쒶쒷쒸쒹쒺쒻쒼쒽쒾쒿쓀쓁쓂쓃쓄쓅쓆쓇쓈쓉쓊쓋쓌쓍쓎쓏쓐쓑쓒쓓쓔쓕쓖쓗쓘쓙쓚쓛쓜쓝쓞쓟쓠쓡쓢쓣쓤쓥쓦쓧쓨쓩쓪쓫쓬쓭쓮쓯쓰쓱쓲쓳쓴쓵쓶쓷쓸쓹쓺쓻쓼쓽쓾쓿씀씁씂씃씄씅씆씇씈씉씊씋씌씍씎씏씐씑씒씓씔씕씖씗씘씙씚씛씜씝씞씟씠씡씢씣씤씥씦씧씨씩씪씫씬씭씮씯씰씱씲씳씴씵씶씷씸씹씺씻씼씽씾씿앀앁앂앃아악앆앇안앉않앋알앍앎앏앐앑앒앓암압앖앗았앙앚앛앜앝앞앟애액앢앣앤앥앦앧앨앩앪앫앬앭앮앯앰앱앲앳앴앵앶앷앸앹앺앻야약앾앿얀얁얂얃얄얅얆얇얈얉얊얋얌얍얎얏얐양얒얓얔얕얖얗얘얙얚얛얜얝얞얟얠얡얢얣얤얥얦얧얨얩얪얫얬얭얮얯얰얱얲얳어억얶얷언얹얺얻얼얽얾얿엀엁엂엃엄업없엇었엉엊엋엌엍엎엏에엑엒엓엔엕엖엗엘엙엚엛엜엝엞엟엠엡엢엣엤엥엦엧엨엩엪엫여역엮엯연엱엲엳열엵엶엷엸엹엺엻염엽엾엿였영옂옃옄옅옆옇예옉옊옋옌옍옎옏옐옑옒옓옔옕옖옗옘옙옚옛옜옝옞옟옠옡옢옣오옥옦옧온옩옪옫올옭옮옯옰옱옲옳옴옵옶옷옸옹옺옻옼옽옾옿와왁왂왃완왅왆왇왈왉왊왋왌왍왎왏왐왑왒왓왔왕왖왗왘왙왚왛왜왝왞왟왠왡왢왣왤왥왦왧왨왩왪왫왬왭왮왯왰왱왲왳왴왵왶왷외왹왺왻왼왽왾왿욀욁욂욃욄욅욆욇욈욉욊욋욌욍욎욏욐욑욒욓요욕욖욗욘욙욚욛욜욝욞욟욠욡욢욣욤욥욦욧욨용욪욫욬욭욮욯우욱욲욳운욵욶욷울욹욺욻욼욽욾욿움웁웂웃웄웅웆웇웈웉웊웋워웍웎웏원웑웒웓월웕웖웗웘웙웚웛웜웝웞웟웠웡웢웣웤웥웦웧웨웩웪웫웬웭웮웯웰웱웲웳웴웵웶웷웸웹웺웻웼웽웾웿윀윁윂윃위윅윆윇윈윉윊윋윌윍윎윏윐윑윒윓윔윕윖윗윘윙윚윛윜윝윞윟유육윢윣윤윥윦윧율윩윪윫윬윭윮윯윰윱윲윳윴융윶윷윸윹윺윻으윽윾윿은읁읂읃을읅읆읇읈읉읊읋음읍읎읏읐응읒읓읔읕읖읗의읙읚읛읜읝읞읟읠읡읢읣읤읥읦읧읨읩읪읫읬읭읮읯읰읱읲읳이익읶읷인읹읺읻일읽읾읿잀잁잂잃임입잆잇있잉잊잋잌잍잎잏자작잒잓잔잕잖잗잘잙잚잛잜잝잞잟잠잡잢잣잤장잦잧잨잩잪잫재잭잮잯잰잱잲잳잴잵잶잷잸잹잺잻잼잽잾잿쟀쟁쟂쟃쟄쟅쟆쟇쟈쟉쟊쟋쟌쟍쟎쟏쟐쟑쟒쟓쟔쟕쟖쟗쟘쟙쟚쟛쟜쟝쟞쟟쟠쟡쟢쟣쟤쟥쟦쟧쟨쟩쟪쟫쟬쟭쟮쟯쟰쟱쟲쟳쟴쟵쟶쟷쟸쟹쟺쟻쟼쟽쟾쟿저적젂젃전젅젆젇절젉젊젋젌젍젎젏점접젒젓젔정젖젗젘젙젚젛제젝젞젟젠젡젢젣젤젥젦젧젨젩젪젫젬젭젮젯젰젱젲젳젴젵젶젷져젹젺젻젼젽젾젿졀졁졂졃졄졅졆졇졈졉졊졋졌졍졎졏졐졑졒졓졔졕졖졗졘졙졚졛졜졝졞졟졠졡졢졣졤졥졦졧졨졩졪졫졬졭졮졯조족졲졳존졵졶졷졸졹졺졻졼졽졾졿좀좁좂좃좄종좆좇좈좉좊좋좌좍좎좏좐좑좒좓좔좕좖좗좘좙좚좛좜좝좞좟좠좡좢좣좤좥좦좧좨좩좪좫좬좭좮좯좰좱좲좳좴좵좶좷좸좹좺좻좼좽좾좿죀죁죂죃죄죅죆죇죈죉죊죋죌죍죎죏죐죑죒죓죔죕죖죗죘죙죚죛죜죝죞죟죠죡죢죣죤죥죦죧죨죩죪죫죬죭죮죯죰죱죲죳죴죵죶죷죸죹죺죻주죽죾죿준줁줂줃줄줅줆줇줈줉줊줋줌줍줎줏줐중줒줓줔줕줖줗줘줙줚줛줜줝줞줟줠줡줢줣줤줥줦줧줨줩줪줫줬줭줮줯줰줱줲줳줴줵줶줷줸줹줺줻줼줽줾줿쥀쥁쥂쥃쥄쥅쥆쥇쥈쥉쥊쥋쥌쥍쥎쥏쥐쥑쥒쥓쥔쥕쥖쥗쥘쥙쥚쥛쥜쥝쥞쥟쥠쥡쥢쥣쥤쥥쥦쥧쥨쥩쥪쥫쥬쥭쥮쥯쥰쥱쥲쥳쥴쥵쥶쥷쥸쥹쥺쥻쥼쥽쥾쥿즀즁즂즃즄즅즆즇즈즉즊즋즌즍즎즏즐즑즒즓즔즕즖즗즘즙즚즛즜증즞즟즠즡즢즣즤즥즦즧즨즩즪즫즬즭즮즯즰즱즲즳즴즵즶즷즸즹즺즻즼즽즾즿지직짂짃진짅짆짇질짉짊짋짌짍짎짏짐집짒짓짔징짖짗짘짙짚짛짜짝짞짟짠짡짢짣짤짥짦짧짨짩짪짫짬짭짮짯짰짱짲짳짴짵짶짷째짹짺짻짼짽짾짿쨀쨁쨂쨃쨄쨅쨆쨇쨈쨉쨊쨋쨌쨍쨎쨏쨐쨑쨒쨓쨔쨕쨖쨗쨘쨙쨚쨛쨜쨝쨞쨟쨠쨡쨢쨣쨤쨥쨦쨧쨨쨩쨪쨫쨬쨭쨮쨯쨰쨱쨲쨳쨴쨵쨶쨷쨸쨹쨺쨻쨼쨽쨾쨿쩀쩁쩂쩃쩄쩅쩆쩇쩈쩉쩊쩋쩌쩍쩎쩏쩐쩑쩒쩓쩔쩕쩖쩗쩘쩙쩚쩛쩜쩝쩞쩟쩠쩡쩢쩣쩤쩥쩦쩧쩨쩩쩪쩫쩬쩭쩮쩯쩰쩱쩲쩳쩴쩵쩶쩷쩸쩹쩺쩻쩼쩽쩾쩿쪀쪁쪂쪃쪄쪅쪆쪇쪈쪉쪊쪋쪌쪍쪎쪏쪐쪑쪒쪓쪔쪕쪖쪗쪘쪙쪚쪛쪜쪝쪞쪟쪠쪡쪢쪣쪤쪥쪦쪧쪨쪩쪪쪫쪬쪭쪮쪯쪰쪱쪲쪳쪴쪵쪶쪷쪸쪹쪺쪻쪼쪽쪾쪿쫀쫁쫂쫃쫄쫅쫆쫇쫈쫉쫊쫋쫌쫍쫎쫏쫐쫑쫒쫓쫔쫕쫖쫗쫘쫙쫚쫛쫜쫝쫞쫟쫠쫡쫢쫣쫤쫥쫦쫧쫨쫩쫪쫫쫬쫭쫮쫯쫰쫱쫲쫳쫴쫵쫶쫷쫸쫹쫺쫻쫼쫽쫾쫿쬀쬁쬂쬃쬄쬅쬆쬇쬈쬉쬊쬋쬌쬍쬎쬏쬐쬑쬒쬓쬔쬕쬖쬗쬘쬙쬚쬛쬜쬝쬞쬟쬠쬡쬢쬣쬤쬥쬦쬧쬨쬩쬪쬫쬬쬭쬮쬯쬰쬱쬲쬳쬴쬵쬶쬷쬸쬹쬺쬻쬼쬽쬾쬿쭀쭁쭂쭃쭄쭅쭆쭇쭈쭉쭊쭋쭌쭍쭎쭏쭐쭑쭒쭓쭔쭕쭖쭗쭘쭙쭚쭛쭜쭝쭞쭟쭠쭡쭢쭣쭤쭥쭦쭧쭨쭩쭪쭫쭬쭭쭮쭯쭰쭱쭲쭳쭴쭵쭶쭷쭸쭹쭺쭻쭼쭽쭾쭿쮀쮁쮂쮃쮄쮅쮆쮇쮈쮉쮊쮋쮌쮍쮎쮏쮐쮑쮒쮓쮔쮕쮖쮗쮘쮙쮚쮛쮜쮝쮞쮟쮠쮡쮢쮣쮤쮥쮦쮧쮨쮩쮪쮫쮬쮭쮮쮯쮰쮱쮲쮳쮴쮵쮶쮷쮸쮹쮺쮻쮼쮽쮾쮿쯀쯁쯂쯃쯄쯅쯆쯇쯈쯉쯊쯋쯌쯍쯎쯏쯐쯑쯒쯓쯔쯕쯖쯗쯘쯙쯚쯛쯜쯝쯞쯟쯠쯡쯢쯣쯤쯥쯦쯧쯨쯩쯪쯫쯬쯭쯮쯯쯰쯱쯲쯳쯴쯵쯶쯷쯸쯹쯺쯻쯼쯽쯾쯿찀찁찂찃찄찅찆찇찈찉찊찋찌찍찎찏찐찑찒찓찔찕찖찗찘찙찚찛찜찝찞찟찠찡찢찣찤찥찦찧차착찪찫찬찭찮찯찰찱찲찳찴찵찶찷참찹찺찻찼창찾찿챀챁챂챃채책챆챇챈챉챊챋챌챍챎챏챐챑챒챓챔챕챖챗챘챙챚챛챜챝챞챟챠챡챢챣챤챥챦챧챨챩챪챫챬챭챮챯챰챱챲챳챴챵챶챷챸챹챺챻챼챽챾챿첀첁첂첃첄첅첆첇첈첉첊첋첌첍첎첏첐첑첒첓첔첕첖첗처척첚첛천첝첞첟철첡첢첣첤첥첦첧첨첩첪첫첬청첮첯첰첱첲첳체첵첶첷첸첹첺첻첼첽첾첿쳀쳁쳂쳃쳄쳅쳆쳇쳈쳉쳊쳋쳌쳍쳎쳏쳐쳑쳒쳓쳔쳕쳖쳗쳘쳙쳚쳛쳜쳝쳞쳟쳠쳡쳢쳣쳤쳥쳦쳧쳨쳩쳪쳫쳬쳭쳮쳯쳰쳱쳲쳳쳴쳵쳶쳷쳸쳹쳺쳻쳼쳽쳾쳿촀촁촂촃촄촅촆촇초촉촊촋촌촍촎촏촐촑촒촓촔촕촖촗촘촙촚촛촜총촞촟촠촡촢촣촤촥촦촧촨촩촪촫촬촭촮촯촰촱촲촳촴촵촶촷촸촹촺촻촼촽촾촿쵀쵁쵂쵃쵄쵅쵆쵇쵈쵉쵊쵋쵌쵍쵎쵏쵐쵑쵒쵓쵔쵕쵖쵗쵘쵙쵚쵛최쵝쵞쵟쵠쵡쵢쵣쵤쵥쵦쵧쵨쵩쵪쵫쵬쵭쵮쵯쵰쵱쵲쵳쵴쵵쵶쵷쵸쵹쵺쵻쵼쵽쵾쵿춀춁춂춃춄춅춆춇춈춉춊춋춌춍춎춏춐춑춒춓추축춖춗춘춙춚춛출춝춞춟춠춡춢춣춤춥춦춧춨충춪춫춬춭춮춯춰춱춲춳춴춵춶춷춸춹춺춻춼춽춾춿췀췁췂췃췄췅췆췇췈췉췊췋췌췍췎췏췐췑췒췓췔췕췖췗췘췙췚췛췜췝췞췟췠췡췢췣췤췥췦췧취췩췪췫췬췭췮췯췰췱췲췳췴췵췶췷췸췹췺췻췼췽췾췿츀츁츂츃츄츅츆츇츈츉츊츋츌츍츎츏츐츑츒츓츔츕츖츗츘츙츚츛츜츝츞츟츠측츢츣츤츥츦츧츨츩츪츫츬츭츮츯츰츱츲츳츴층츶츷츸츹츺츻츼츽츾츿칀칁칂칃칄칅칆칇칈칉칊칋칌칍칎칏칐칑칒칓칔칕칖칗치칙칚칛친칝칞칟칠칡칢칣칤칥칦칧침칩칪칫칬칭칮칯칰칱칲칳카칵칶칷칸칹칺칻칼칽칾칿캀캁캂캃캄캅캆캇캈캉캊캋캌캍캎캏캐캑캒캓캔캕캖캗캘캙캚캛캜캝캞캟캠캡캢캣캤캥캦캧캨캩캪캫캬캭캮캯캰캱캲캳캴캵캶캷캸캹캺캻캼캽캾캿컀컁컂컃컄컅컆컇컈컉컊컋컌컍컎컏컐컑컒컓컔컕컖컗컘컙컚컛컜컝컞컟컠컡컢컣커컥컦컧컨컩컪컫컬컭컮컯컰컱컲컳컴컵컶컷컸컹컺컻컼컽컾컿케켁켂켃켄켅켆켇켈켉켊켋켌켍켎켏켐켑켒켓켔켕켖켗켘켙켚켛켜켝켞켟켠켡켢켣켤켥켦켧켨켩켪켫켬켭켮켯켰켱켲켳켴켵켶켷켸켹켺켻켼켽켾켿콀콁콂콃콄콅콆콇콈콉콊콋콌콍콎콏콐콑콒콓코콕콖콗콘콙콚콛콜콝콞콟콠콡콢콣콤콥콦콧콨콩콪콫콬콭콮콯콰콱콲콳콴콵콶콷콸콹콺콻콼콽콾콿쾀쾁쾂쾃쾄쾅쾆쾇쾈쾉쾊쾋쾌쾍쾎쾏쾐쾑쾒쾓쾔쾕쾖쾗쾘쾙쾚쾛쾜쾝쾞쾟쾠쾡쾢쾣쾤쾥쾦쾧쾨쾩쾪쾫쾬쾭쾮쾯쾰쾱쾲쾳쾴쾵쾶쾷쾸쾹쾺쾻쾼쾽쾾쾿쿀쿁쿂쿃쿄쿅쿆쿇쿈쿉쿊쿋쿌쿍쿎쿏쿐쿑쿒쿓쿔쿕쿖쿗쿘쿙쿚쿛쿜쿝쿞쿟쿠쿡쿢쿣쿤쿥쿦쿧쿨쿩쿪쿫쿬쿭쿮쿯쿰쿱쿲쿳쿴쿵쿶쿷쿸쿹쿺쿻쿼쿽쿾쿿퀀퀁퀂퀃퀄퀅퀆퀇퀈퀉퀊퀋퀌퀍퀎퀏퀐퀑퀒퀓퀔퀕퀖퀗퀘퀙퀚퀛퀜퀝퀞퀟퀠퀡퀢퀣퀤퀥퀦퀧퀨퀩퀪퀫퀬퀭퀮퀯퀰퀱퀲퀳퀴퀵퀶퀷퀸퀹퀺퀻퀼퀽퀾퀿큀큁큂큃큄큅큆큇큈큉큊큋큌큍큎큏큐큑큒큓큔큕큖큗큘큙큚큛큜큝큞큟큠큡큢큣큤큥큦큧큨큩큪큫크큭큮큯큰큱큲큳클큵큶큷큸큹큺큻큼큽큾큿킀킁킂킃킄킅킆킇킈킉킊킋킌킍킎킏킐킑킒킓킔킕킖킗킘킙킚킛킜킝킞킟킠킡킢킣키킥킦킧킨킩킪킫킬킭킮킯킰킱킲킳킴킵킶킷킸킹킺킻킼킽킾킿타탁탂탃탄탅탆탇탈탉탊탋탌탍탎탏탐탑탒탓탔탕탖탗탘탙탚탛태택탞탟탠탡탢탣탤탥탦탧탨탩탪탫탬탭탮탯탰탱탲탳탴탵탶탷탸탹탺탻탼탽탾탿턀턁턂턃턄턅턆턇턈턉턊턋턌턍턎턏턐턑턒턓턔턕턖턗턘턙턚턛턜턝턞턟턠턡턢턣턤턥턦턧턨턩턪턫턬턭턮턯터턱턲턳턴턵턶턷털턹턺턻턼턽턾턿텀텁텂텃텄텅텆텇텈텉텊텋테텍텎텏텐텑텒텓텔텕텖텗텘텙텚텛템텝텞텟텠텡텢텣텤텥텦텧텨텩텪텫텬텭텮텯텰텱텲텳텴텵텶텷텸텹텺텻텼텽텾텿톀톁톂톃톄톅톆톇톈톉톊톋톌톍톎톏톐톑톒톓톔톕톖톗톘톙톚톛톜톝톞톟토톡톢톣톤톥톦톧톨톩톪톫톬톭톮톯톰톱톲톳톴통톶톷톸톹톺톻톼톽톾톿퇀퇁퇂퇃퇄퇅퇆퇇퇈퇉퇊퇋퇌퇍퇎퇏퇐퇑퇒퇓퇔퇕퇖퇗퇘퇙퇚퇛퇜퇝퇞퇟퇠퇡퇢퇣퇤퇥퇦퇧퇨퇩퇪퇫퇬퇭퇮퇯퇰퇱퇲퇳퇴퇵퇶퇷퇸퇹퇺퇻퇼퇽퇾퇿툀툁툂툃툄툅툆툇툈툉툊툋툌툍툎툏툐툑툒툓툔툕툖툗툘툙툚툛툜툝툞툟툠툡툢툣툤툥툦툧툨툩툪툫투툭툮툯툰툱툲툳툴툵툶툷툸툹툺툻툼툽툾툿퉀퉁퉂퉃퉄퉅퉆퉇퉈퉉퉊퉋퉌퉍퉎퉏퉐퉑퉒퉓퉔퉕퉖퉗퉘퉙퉚퉛퉜퉝퉞퉟퉠퉡퉢퉣퉤퉥퉦퉧퉨퉩퉪퉫퉬퉭퉮퉯퉰퉱퉲퉳퉴퉵퉶퉷퉸퉹퉺퉻퉼퉽퉾퉿튀튁튂튃튄튅튆튇튈튉튊튋튌튍튎튏튐튑튒튓튔튕튖튗튘튙튚튛튜튝튞튟튠튡튢튣튤튥튦튧튨튩튪튫튬튭튮튯튰튱튲튳튴튵튶튷트특튺튻튼튽튾튿틀틁틂틃틄틅틆틇틈틉틊틋틌틍틎틏틐틑틒틓틔틕틖틗틘틙틚틛틜틝틞틟틠틡틢틣틤틥틦틧틨틩틪틫틬틭틮틯티틱틲틳틴틵틶틷틸틹틺틻틼틽틾틿팀팁팂팃팄팅팆팇팈팉팊팋파팍팎팏판팑팒팓팔팕팖팗팘팙팚팛팜팝팞팟팠팡팢팣팤팥팦팧패팩팪팫팬팭팮팯팰팱팲팳팴팵팶팷팸팹팺팻팼팽팾팿퍀퍁퍂퍃퍄퍅퍆퍇퍈퍉퍊퍋퍌퍍퍎퍏퍐퍑퍒퍓퍔퍕퍖퍗퍘퍙퍚퍛퍜퍝퍞퍟퍠퍡퍢퍣퍤퍥퍦퍧퍨퍩퍪퍫퍬퍭퍮퍯퍰퍱퍲퍳퍴퍵퍶퍷퍸퍹퍺퍻퍼퍽퍾퍿펀펁펂펃펄펅펆펇펈펉펊펋펌펍펎펏펐펑펒펓펔펕펖펗페펙펚펛펜펝펞펟펠펡펢펣펤펥펦펧펨펩펪펫펬펭펮펯펰펱펲펳펴펵펶펷편펹펺펻펼펽펾펿폀폁폂폃폄폅폆폇폈평폊폋폌폍폎폏폐폑폒폓폔폕폖폗폘폙폚폛폜폝폞폟폠폡폢폣폤폥폦폧폨폩폪폫포폭폮폯폰폱폲폳폴폵폶폷폸폹폺폻폼폽폾폿퐀퐁퐂퐃퐄퐅퐆퐇퐈퐉퐊퐋퐌퐍퐎퐏퐐퐑퐒퐓퐔퐕퐖퐗퐘퐙퐚퐛퐜퐝퐞퐟퐠퐡퐢퐣퐤퐥퐦퐧퐨퐩퐪퐫퐬퐭퐮퐯퐰퐱퐲퐳퐴퐵퐶퐷퐸퐹퐺퐻퐼퐽퐾퐿푀푁푂푃푄푅푆푇푈푉푊푋푌푍푎푏푐푑푒푓푔푕푖푗푘푙푚푛표푝푞푟푠푡푢푣푤푥푦푧푨푩푪푫푬푭푮푯푰푱푲푳푴푵푶푷푸푹푺푻푼푽푾푿풀풁풂풃풄풅풆풇품풉풊풋풌풍풎풏풐풑풒풓풔풕풖풗풘풙풚풛풜풝풞풟풠풡풢풣풤풥풦풧풨풩풪풫풬풭풮풯풰풱풲풳풴풵풶풷풸풹풺풻풼풽풾풿퓀퓁퓂퓃퓄퓅퓆퓇퓈퓉퓊퓋퓌퓍퓎퓏퓐퓑퓒퓓퓔퓕퓖퓗퓘퓙퓚퓛퓜퓝퓞퓟퓠퓡퓢퓣퓤퓥퓦퓧퓨퓩퓪퓫퓬퓭퓮퓯퓰퓱퓲퓳퓴퓵퓶퓷퓸퓹퓺퓻퓼퓽퓾퓿픀픁픂픃프픅픆픇픈픉픊픋플픍픎픏픐픑픒픓픔픕픖픗픘픙픚픛픜픝픞픟픠픡픢픣픤픥픦픧픨픩픪픫픬픭픮픯픰픱픲픳픴픵픶픷픸픹픺픻피픽픾픿핀핁핂핃필핅핆핇핈핉핊핋핌핍핎핏핐핑핒핓핔핕핖핗하학핚핛한핝핞핟할핡핢핣핤핥핦핧함합핪핫핬항핮핯핰핱핲핳해핵핶핷핸핹핺핻핼핽핾핿햀햁햂햃햄햅햆햇했행햊햋햌햍햎햏햐햑햒햓햔햕햖햗햘햙햚햛햜햝햞햟햠햡햢햣햤향햦햧햨햩햪햫햬햭햮햯햰햱햲햳햴햵햶햷햸햹햺햻햼햽햾햿헀헁헂헃헄헅헆헇허헉헊헋헌헍헎헏헐헑헒헓헔헕헖헗험헙헚헛헜헝헞헟헠헡헢헣헤헥헦헧헨헩헪헫헬헭헮헯헰헱헲헳헴헵헶헷헸헹헺헻헼헽헾헿혀혁혂혃현혅혆혇혈혉혊혋혌혍혎혏혐협혒혓혔형혖혗혘혙혚혛혜혝혞혟혠혡혢혣혤혥혦혧혨혩혪혫혬혭혮혯혰혱혲혳혴혵혶혷호혹혺혻혼혽혾혿홀홁홂홃홄홅홆홇홈홉홊홋홌홍홎홏홐홑홒홓화확홖홗환홙홚홛활홝홞홟홠홡홢홣홤홥홦홧홨황홪홫홬홭홮홯홰홱홲홳홴홵홶홷홸홹홺홻홼홽홾홿횀횁횂횃횄횅횆횇횈횉횊횋회획횎횏횐횑횒횓횔횕횖횗횘횙횚횛횜횝횞횟횠횡횢횣횤횥횦횧효횩횪횫횬횭횮횯횰횱횲횳횴횵횶횷횸횹횺횻횼횽횾횿훀훁훂훃후훅훆훇훈훉훊훋훌훍훎훏훐훑훒훓훔훕훖훗훘훙훚훛훜훝훞훟훠훡훢훣훤훥훦훧훨훩훪훫훬훭훮훯훰훱훲훳훴훵훶훷훸훹훺훻훼훽훾훿휀휁휂휃휄휅휆휇휈휉휊휋휌휍휎휏휐휑휒휓휔휕휖휗휘휙휚휛휜휝휞휟휠휡휢휣휤휥휦휧휨휩휪휫휬휭휮휯휰휱휲휳휴휵휶휷휸휹휺휻휼휽휾휿흀흁흂흃흄흅흆흇흈흉흊흋흌흍흎흏흐흑흒흓흔흕흖흗흘흙흚흛흜흝흞흟흠흡흢흣흤흥흦흧흨흩흪흫희흭흮흯흰흱흲흳흴흵흶흷흸흹흺흻흼흽흾흿힀힁힂힃힄힅힆힇히힉힊힋힌힍힎힏힐힑힒힓힔힕힖힗힘힙힚힛힜힝힞힟힠힡힢힣" - - -class UnqualifiedFontException(Exception): - def __init__(self, font: DSFont): - super().__init__(f"Unqualified font: {font.path}") - self.font = font - - -def random_char(length: int, font: DSFont, char_set: str) -> str: - assert length > 0 - assert len(char_set) > 0 - - ret = "" - fail_cnt = 0 - while len(ret) < length: - char = char_set[random.randint(0, len(char_set) - 1)] - if char_in_font(char, font.path): - ret += char - else: - fail_cnt += 1 - print(f"FAILING {fail_cnt} for {font.path}") - if fail_cnt > 2000: - raise UnqualifiedFontException(font) - - return ret - - -class CorpusGenerationConfig(object): - def __init__( - self, - min_num_line: int, - max_num_line: int, - min_num_char_per_line: int, - max_num_char_per_line: int, - ): - self.min_num_line = min_num_line - self.max_num_line = max_num_line - self.min_num_char_per_line = min_num_char_per_line - self.max_num_char_per_line = max_num_char_per_line - - -class CommonCorpusGenerator(object): - def generate_line(self, length: int, font: DSFont) -> str: - _, _ = length, font - pass - - def generate(self, config: CorpusGenerationConfig, font: DSFont) -> str: - num_lines = random.randint(config.min_num_line, config.max_num_line) - lines = [] - - for _ in range(num_lines): - num_chars = random.randint( - config.min_num_char_per_line, config.max_num_char_per_line - ) - lines.append(self.generate_line(num_chars, font)) - - return "\n".join(lines) - - -class JapaneseUtaNetCorpusGenerator(CommonCorpusGenerator): - def _corpus_generator(self): - import sqlite3 - - self.conn = sqlite3.connect("lyrics_corpus/cache/uta-net.db") - self.cur = self.conn.cursor() - - while True: - self.cur.execute( - "SELECT lyrics FROM lyrics WHERE song_id IN (SELECT song_id FROM lyrics ORDER BY RANDOM() LIMIT 1)" - ) - row = self.cur.fetchone() - if row is not None: - row = str(row[0]) - for line in row.splitlines(): - if len(line) > 0: - yield line - continue - else: - return - - def _random_place_holder(self, font: DSFont) -> str: - r = random.randint(1, 3) - if r == 1: - ret = random_char(1, font, katakana) - elif r == 2: - ret = random_char(1, font, hiragana) - else: - ret = random_char(1, font, common_kanji) - return ret - - def __init__(self): - self.corpus_iterator = self._corpus_generator() - - def generate_line(self, length: int, font: DSFont) -> str: - while True: - try: - # get new line - line = next(self.corpus_iterator) - - # filter for font - ret_line = "" - for char in line: - if char_in_font(char, font.path): - ret_line += char - else: - ret_line += self._random_place_holder(font) - - # truncate or pad - if len(ret_line) >= length: - ret_line = ret_line[:length] - else: - for _ in range(length - len(ret_line)): - ret_line += self._random_place_holder(font) - - return ret_line - - except StopIteration: - self.corpus_iterator = self._corpus_generator() - - -class RandomCorpusGeneratorWithEnglish(CommonCorpusGenerator): - def __init__( - self, char_set: str, prob: float = 0.3, when_length_greater_than: int = 10 - ): - if os.path.exists("wordlist.txt"): - with open("wordlist.txt", "r", encoding="utf-8") as f: - self.english_words = f.read().splitlines() - else: - word_site = "https://www.mit.edu/~ecprice/wordlist.10000" - response = requests.get(word_site) - self.english_words = response.text.splitlines() - self.char_set = char_set - self.prob = prob - self.when_length_greater_than = when_length_greater_than - - def generate_line(self, length: int, font: DSFont) -> str: - generate_corpus = random_char(length, font, self.char_set) - if length > self.when_length_greater_than: - if random.random() < self.prob: - random_english_word = random.choice(self.english_words) - if len(random_english_word) > length: - return random_english_word[:length] - start_place = random.randint(0, length - len(random_english_word)) - ret = ( - generate_corpus[:start_place] - + random_english_word - + generate_corpus[start_place + len(random_english_word) :] - ) - assert len(ret) == length - return ret - return generate_corpus - - -class SimplifiedChineseRandomCorpusGeneratorWithEnglish( - RandomCorpusGeneratorWithEnglish -): - def __init__(self, prob: float = 0.3, when_length_greater_than: int = 10): - super().__init__(common_simplified_chinese, prob, when_length_greater_than) - - -class TraditionalChineseRandomCorpusGeneratorWithEnglish( - RandomCorpusGeneratorWithEnglish -): - def __init__(self, prob: float = 0.3, when_length_greater_than: int = 10): - super().__init__(common_traditional_chinese, prob, when_length_greater_than) - - -class KoreanRandomCorpusGeneratorWithEnglish(RandomCorpusGeneratorWithEnglish): - def __init__(self, prob: float = 0.3, when_length_greater_than: int = 10): - super().__init__(korean_alphabet, prob, when_length_greater_than) - - -class CorpusGeneratorManager: - def __init__(self): - self.generators = { - "ja": JapaneseUtaNetCorpusGenerator(), - "zh-Hans": SimplifiedChineseRandomCorpusGeneratorWithEnglish(), - "zh-Hant": TraditionalChineseRandomCorpusGeneratorWithEnglish(), - "ko": KoreanRandomCorpusGeneratorWithEnglish(), - } - - def _get_generator( - self, font: DSFont, CJK_language: str = None - ) -> CommonCorpusGenerator: - langauge = CJK_language if CJK_language is not None else font.language - - for k, v in self.generators.items(): - if langauge.startswith(k): - return v - - raise Exception(f"no generator for {font.language}") - - def generate( - self, config: CorpusGenerationConfig, font: DSFont, CJK_language: str = None - ) -> str: - return self._get_generator(font, CJK_language).generate(config, font) diff --git a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/insetgan.py b/spaces/gyugnsu/DragGan-Inversion/stylegan_human/insetgan.py deleted file mode 100644 index 722a6e3a6cc02af06faa2f825d1ddef2532bd2cc..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/insetgan.py +++ /dev/null @@ -1,448 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - - -import torch -import torch.nn.functional as F -from tqdm import tqdm -from lpips import LPIPS -import numpy as np -from torch_utils.models import Generator as bodyGAN -from torch_utils.models_face import Generator as FaceGAN -import dlib -from utils.face_alignment import align_face_for_insetgan -from utils.util import visual, tensor_to_numpy, numpy_to_tensor -import legacy -import os -import click - - -class InsetGAN(torch.nn.Module): - def __init__(self, stylebody_ckpt, styleface_ckpt): - super().__init__() - - # convert pkl to pth - if not os.path.exists(stylebody_ckpt.replace('.pkl', '.pth')): - legacy.convert( - stylebody_ckpt, stylebody_ckpt.replace('.pkl', '.pth')) - stylebody_ckpt = stylebody_ckpt.replace('.pkl', '.pth') - - if not os.path.exists(styleface_ckpt.replace('.pkl', '.pth')): - legacy.convert( - styleface_ckpt, styleface_ckpt.replace('.pkl', '.pth')) - styleface_ckpt = styleface_ckpt.replace('.pkl', '.pth') - - # dual generator - config = {"latent": 512, "n_mlp": 8, "channel_multiplier": 2} - self.body_generator = bodyGAN( - size=1024, - style_dim=config["latent"], - n_mlp=config["n_mlp"], - channel_multiplier=config["channel_multiplier"] - ) - self.body_generator.load_state_dict( - torch.load(stylebody_ckpt)['g_ema']) - self.body_generator.eval().requires_grad_(False).cuda() - - self.face_generator = FaceGAN( - size=1024, - style_dim=config["latent"], - n_mlp=config["n_mlp"], - channel_multiplier=config["channel_multiplier"] - ) - self.face_generator.load_state_dict( - torch.load(styleface_ckpt)['g_ema']) - self.face_generator.eval().requires_grad_(False).cuda() - # crop function - self.dlib_predictor = dlib.shape_predictor( - './pretrained_models/shape_predictor_68_face_landmarks.dat') - self.dlib_cnn_face_detector = dlib.cnn_face_detection_model_v1( - "pretrained_models/mmod_human_face_detector.dat") - - # criterion - self.lpips_loss = LPIPS(net='alex').cuda().eval() - self.l1_loss = torch.nn.L1Loss(reduction='mean') - - def loss_coarse(self, A_face, B, p1=500, p2=0.05): - A_face = F.interpolate(A_face, size=(64, 64), mode='area') - B = F.interpolate(B, size=(64, 64), mode='area') - loss_l1 = p1 * self.l1_loss(A_face, B) - loss_lpips = p2 * self.lpips_loss(A_face, B) - return loss_l1 + loss_lpips - - @staticmethod - def get_border_mask(A, x, spec): - mask = torch.zeros_like(A) - mask[:, :, :x, ] = 1 - mask[:, :, -x:, ] = 1 - mask[:, :, :, :x] = 1 - mask[:, :, :, -x:] = 1 - return mask - - @staticmethod - def get_body_mask(A, crop, padding=4): - mask = torch.ones_like(A) - mask[:, :, crop[1]-padding:crop[3]+padding, - crop[0]-padding:crop[2]+padding] = 0 - return mask - - def loss_border(self, A_face, B, p1=10000, p2=2, spec=None): - mask = self.get_border_mask(A_face, 8, spec) - loss_l1 = p1 * self.l1_loss(A_face*mask, B*mask) - loss_lpips = p2 * self.lpips_loss(A_face*mask, B*mask) - return loss_l1 + loss_lpips - - def loss_body(self, A, B, crop, p1=9000, p2=0.1): - padding = int((crop[3] - crop[1]) / 20) - mask = self.get_body_mask(A, crop, padding) - loss_l1 = p1 * self.l1_loss(A*mask, B*mask) - loss_lpips = p2 * self.lpips_loss(A*mask, B*mask) - return loss_l1+loss_lpips - - def loss_face(self, A, B, crop, p1=5000, p2=1.75): - mask = 1 - self.get_body_mask(A, crop) - loss_l1 = p1 * self.l1_loss(A*mask, B*mask) - loss_lpips = p2 * self.lpips_loss(A*mask, B*mask) - return loss_l1+loss_lpips - - def loss_reg(self, w, w_mean, p1, w_plus_delta=None, p2=None): - return p1 * torch.mean(((w - w_mean) ** 2)) + p2 * torch.mean(w_plus_delta ** 2) - - # FFHQ type - def detect_face_dlib(self, img): - # tensor to numpy array rgb uint8 - img = tensor_to_numpy(img) - aligned_image, crop, rect = align_face_for_insetgan(img=img, - detector=self.dlib_cnn_face_detector, - predictor=self.dlib_predictor, - output_size=256) - - aligned_image = np.array(aligned_image) - aligned_image = numpy_to_tensor(aligned_image) - return aligned_image, crop, rect - - # joint optimization - def dual_optimizer(self, - face_w, - body_w, - joint_steps=500, - face_initial_learning_rate=0.02, - body_initial_learning_rate=0.05, - lr_rampdown_length=0.25, - lr_rampup_length=0.05, - seed=None, - output_path=None, - video=0): - ''' - Given a face_w, optimize a body_w with suitable body pose & shape for face_w - ''' - def visual_(path, synth_body, synth_face, body_crop, step, both=False, init_body_with_face=None): - tmp = synth_body.clone().detach() - tmp[:, :, body_crop[1]:body_crop[3], - body_crop[0]:body_crop[2]] = synth_face - if both: - tmp = torch.cat([synth_body, tmp], dim=3) - save_path = os.path.join(path, f"{step:04d}.jpg") - visual(tmp, save_path) - - def forward(face_w_opt, - body_w_opt, - face_w_delta, - body_w_delta, - body_crop, - update_crop=False - ): - if face_w_opt.shape[1] != 18: - face_ws = (face_w_opt).repeat([1, 18, 1]) - else: - face_ws = face_w_opt.clone() - face_ws = face_ws + face_w_delta - synth_face, _ = self.face_generator( - [face_ws], input_is_latent=True, randomize_noise=False) - - body_ws = (body_w_opt).repeat([1, 18, 1]) - body_ws = body_ws + body_w_delta - synth_body, _ = self.body_generator( - [body_ws], input_is_latent=True, randomize_noise=False) - - if update_crop: - old_r = (body_crop[3]-body_crop[1] - ) // 2, (body_crop[2]-body_crop[0]) // 2 - _, body_crop, _ = self.detect_face_dlib(synth_body) - center = (body_crop[1] + body_crop[3] - ) // 2, (body_crop[0] + body_crop[2]) // 2 - body_crop = (center[1] - old_r[1], center[0] - old_r[0], - center[1] + old_r[1], center[0] + old_r[0]) - - synth_body_face = synth_body[:, :, body_crop[1]:body_crop[3], body_crop[0]:body_crop[2]] - - if synth_face.shape[2] > body_crop[3]-body_crop[1]: - synth_face_resize = F.interpolate(synth_face, size=( - body_crop[3]-body_crop[1], body_crop[2]-body_crop[0]), mode='area') - - return synth_body, synth_body_face, synth_face, synth_face_resize, body_crop - - def update_lr(init_lr, step, num_steps, lr_rampdown_length, lr_rampup_length): - t = step / num_steps - lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length) - lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi) - lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length) - lr = init_lr * lr_ramp - return lr - - # update output_path - output_path = os.path.join(output_path, seed) - os.makedirs(output_path, exist_ok=True) - - # define optimized params - body_w_mean = self.body_generator.mean_latent(10000).detach() - face_w_opt = face_w.clone().detach().requires_grad_(True) - body_w_opt = body_w.clone().detach().requires_grad_(True) - face_w_delta = torch.zeros_like( - face_w.repeat([1, 18, 1])).requires_grad_(True) - body_w_delta = torch.zeros_like( - body_w.repeat([1, 18, 1])).requires_grad_(True) - # generate ref face & body - ref_body, _ = self.body_generator( - [body_w.repeat([1, 18, 1])], input_is_latent=True, randomize_noise=False) - # for inversion - ref_face, _ = self.face_generator( - [face_w.repeat([1, 18, 1])], input_is_latent=True, randomize_noise=False) - # get initilized crop - _, body_crop, _ = self.detect_face_dlib(ref_body) - # NOTE: this is face rect only. no FFHQ type. - _, _, face_crop = self.detect_face_dlib(ref_face) - # create optimizer - face_optimizer = torch.optim.Adam([face_w_opt, face_w_delta], betas=( - 0.9, 0.999), lr=face_initial_learning_rate) - body_optimizer = torch.optim.Adam([body_w_opt, body_w_delta], betas=( - 0.9, 0.999), lr=body_initial_learning_rate) - - global_step = 0 - # Stage1: remove background of face image - face_steps = 25 - pbar = tqdm(range(face_steps)) - for step in pbar: - face_lr = update_lr(face_initial_learning_rate / 2, step, - face_steps, lr_rampdown_length, lr_rampup_length) - for param_group in face_optimizer.param_groups: - param_group['lr'] = face_lr - synth_body, synth_body_face, synth_face_raw, synth_face, body_crop = forward(face_w_opt, - body_w_opt, - face_w_delta, - body_w_delta, - body_crop) - loss_face = self.loss_face( - synth_face_raw, ref_face, face_crop, 5000, 1.75) - loss_coarse = self.loss_coarse( - synth_face, synth_body_face, 50, 0.05) - loss_border = self.loss_border( - synth_face, synth_body_face, 1000, 0.1) - loss = loss_coarse + loss_border + loss_face - face_optimizer.zero_grad() - loss.backward() - face_optimizer.step() - # visualization - if video: - visual_(output_path, synth_body, - synth_face, body_crop, global_step) - pbar.set_description( - ( - f"face: {step:.4f}, lr: {face_lr}, loss: {loss.item():.2f}, loss_coarse: {loss_coarse.item():.2f};" - f"loss_border: {loss_border.item():.2f}, loss_face: {loss_face.item():.2f};" - ) - ) - global_step += 1 - - # Stage2: find a suitable body - body_steps = 150 - pbar = tqdm(range(body_steps)) - for step in pbar: - body_lr = update_lr(body_initial_learning_rate, step, - body_steps, lr_rampdown_length, lr_rampup_length) - update_crop = True if (step % 50 == 0) else False - # update_crop = False - for param_group in body_optimizer.param_groups: - param_group['lr'] = body_lr - synth_body, synth_body_face, synth_face_raw, synth_face, body_crop = forward(face_w_opt, - body_w_opt, - face_w_delta, - body_w_delta, - body_crop, - update_crop=update_crop) - loss_coarse = self.loss_coarse( - synth_face, synth_body_face, 500, 0.05) - loss_border = self.loss_border( - synth_face, synth_body_face, 2500, 0) - loss_body = self.loss_body( - synth_body, ref_body, body_crop, 9000, 0.1) - loss_reg = self.loss_reg( - body_w_opt, body_w_mean, 15000, body_w_delta, 0) - loss = loss_coarse + loss_border + loss_body + loss_reg - body_optimizer.zero_grad() - loss.backward() - body_optimizer.step() - - # visualization - if video: - visual_(output_path, synth_body, - synth_face, body_crop, global_step) - pbar.set_description( - ( - f"body: {step:.4f}, lr: {body_lr}, loss: {loss.item():.2f}, loss_coarse: {loss_coarse.item():.2f};" - f"loss_border: {loss_border.item():.2f}, loss_body: {loss_body.item():.2f}, loss_reg: {loss_reg:.2f}" - ) - ) - global_step += 1 - - # Stage3: joint optimization - interval = 50 - joint_face_steps = joint_steps // 2 - joint_body_steps = joint_steps // 2 - face_step = 0 - body_step = 0 - pbar = tqdm(range(joint_steps)) - flag = -1 - for step in pbar: - if step % interval == 0: - flag += 1 - text_flag = 'optimize_face' if flag % 2 == 0 else 'optimize_body' - synth_body, synth_body_face, synth_face_raw, synth_face, body_crop = forward(face_w_opt, - body_w_opt, - face_w_delta, - body_w_delta, - body_crop) - if text_flag == 'optimize_face': - face_lr = update_lr(face_initial_learning_rate, face_step, - joint_face_steps, lr_rampdown_length, lr_rampup_length) - for param_group in face_optimizer.param_groups: - param_group['lr'] = face_lr - loss_face = self.loss_face( - synth_face_raw, ref_face, face_crop, 5000, 1.75) - loss_coarse = self.loss_coarse( - synth_face, synth_body_face, 500, 0.05) - loss_border = self.loss_border( - synth_face, synth_body_face, 25000, 0) - loss = loss_coarse + loss_border + loss_face - face_optimizer.zero_grad() - loss.backward() - face_optimizer.step() - pbar.set_description( - ( - f"face: {step}, lr: {face_lr:.4f}, loss: {loss.item():.2f}, loss_coarse: {loss_coarse.item():.2f};" - f"loss_border: {loss_border.item():.2f}, loss_face: {loss_face.item():.2f};" - ) - ) - face_step += 1 - else: - body_lr = update_lr(body_initial_learning_rate, body_step, - joint_body_steps, lr_rampdown_length, lr_rampup_length) - for param_group in body_optimizer.param_groups: - param_group['lr'] = body_lr - loss_coarse = self.loss_coarse( - synth_face, synth_body_face, 500, 0.05) - loss_border = self.loss_border( - synth_face, synth_body_face, 2500, 0) - loss_body = self.loss_body( - synth_body, ref_body, body_crop, 9000, 0.1) - loss_reg = self.loss_reg( - body_w_opt, body_w_mean, 25000, body_w_delta, 0) - loss = loss_coarse + loss_border + loss_body + loss_reg - body_optimizer.zero_grad() - loss.backward() - body_optimizer.step() - pbar.set_description( - ( - f"body: {step}, lr: {body_lr:.4f}, loss: {loss.item():.2f}, loss_coarse: {loss_coarse.item():.2f};" - f"loss_border: {loss_border.item():.2f}, loss_body: {loss_body.item():.2f}, loss_reg: {loss_reg:.2f}" - ) - ) - body_step += 1 - if video: - visual_(output_path, synth_body, - synth_face, body_crop, global_step) - global_step += 1 - return face_w_opt.repeat([1, 18, 1])+face_w_delta, body_w_opt.repeat([1, 18, 1])+body_w_delta, body_crop - - -""" -Jointly combine and optimize generated faces and bodies . -Examples: - -\b -# Combine the generate human full-body image from the provided StyleGAN-Human pre-trained model -# and the generated face image from FFHQ model, optimize both latent codes to produce the coherent face-body image -python insetgan.py --body_network=pretrained_models/stylegan_human_v2_1024.pkl --face_network=pretrained_models/ffhq.pkl \\ - --body_seed=82 --face_seed=43 --trunc=0.6 --outdir=outputs/insetgan/ --video 1 -""" - - -@click.command() -@click.pass_context -@click.option('--face_network', default="./pretrained_models/ffhq.pkl", help='Network pickle filename', required=True) -@click.option('--body_network', default='./pretrained_models/stylegan2_1024.pkl', help='Network pickle filename', required=True) -@click.option('--face_seed', type=int, default=82, help='selected random seed') -@click.option('--body_seed', type=int, default=43, help='selected random seed') -@click.option('--joint_steps', type=int, default=500, help='num steps for joint optimization') -@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=0.6, show_default=True) -@click.option('--outdir', help='Where to save the output images', default="outputs/insetgan/", type=str, required=True, metavar='DIR') -@click.option('--video', help="set to 1 if want to save video", type=int, default=0) -def main( - ctx: click.Context, - face_network: str, - body_network: str, - face_seed: int, - body_seed: int, - joint_steps: int, - truncation_psi: float, - outdir: str, - video: int): - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - insgan = InsetGAN(body_network, face_network) - os.makedirs(outdir, exist_ok=True) - face_z = np.random.RandomState(face_seed).randn(1, 512).astype(np.float32) - face_mean = insgan.face_generator.mean_latent(3000) - face_w = insgan.face_generator.get_latent( - torch.from_numpy(face_z).to(device)) # [N, L, C] - face_w = truncation_psi * face_w + (1-truncation_psi) * face_mean - face_img, _ = insgan.face_generator([face_w], input_is_latent=True) - - body_z = np.random.RandomState(body_seed).randn(1, 512).astype(np.float32) - body_mean = insgan.body_generator.mean_latent(3000) - body_w = insgan.body_generator.get_latent( - torch.from_numpy(body_z).to(device)) # [N, L, C] - body_w = truncation_psi * body_w + (1-truncation_psi) * body_mean - body_img, _ = insgan.body_generator([body_w], input_is_latent=True) - - _, body_crop, _ = insgan.detect_face_dlib(body_img) - face_img = F.interpolate(face_img, size=( - body_crop[3]-body_crop[1], body_crop[2]-body_crop[0]), mode='area') - cp_body = body_img.clone() - cp_body[:, :, body_crop[1]:body_crop[3], - body_crop[0]:body_crop[2]] = face_img - - optim_face_w, optim_body_w, crop = insgan.dual_optimizer( - face_w, - body_w, - joint_steps=joint_steps, - seed=f'{face_seed:04d}_{body_seed:04d}', - output_path=outdir, - video=video - ) - - if video: - ffmpeg_cmd = f"ffmpeg -hide_banner -loglevel error -i ./{outdir}/{face_seed:04d}_{body_seed:04d}/%04d.jpg -c:v libx264 -vf fps=30 -pix_fmt yuv420p ./{outdir}/{face_seed:04d}_{body_seed:04d}.mp4" - os.system(ffmpeg_cmd) - new_face_img, _ = insgan.face_generator( - [optim_face_w], input_is_latent=True) - new_shape = crop[3] - crop[1], crop[2] - crop[0] - new_face_img_crop = F.interpolate( - new_face_img, size=new_shape, mode='area') - seamless_body, _ = insgan.body_generator( - [optim_body_w], input_is_latent=True) - seamless_body[:, :, crop[1]:crop[3], crop[0]:crop[2]] = new_face_img_crop - temp = torch.cat([cp_body, seamless_body], dim=3) - visual(temp, f"{outdir}/{face_seed:04d}_{body_seed:04d}.png") - - -if __name__ == "__main__": - main() diff --git a/spaces/hamacojr/CAT-Seg/INSTALL.md b/spaces/hamacojr/CAT-Seg/INSTALL.md deleted file mode 100644 index 684c21171f6fc40b5febd995d45604643374c540..0000000000000000000000000000000000000000 --- a/spaces/hamacojr/CAT-Seg/INSTALL.md +++ /dev/null @@ -1,20 +0,0 @@ -## Installation - -### Requirements -- Linux or macOS with Python ≥ 3.6 -- PyTorch ≥ 1.7 and [torchvision](https://github.com/pytorch/vision/) that matches the PyTorch installation. - Install them together at [pytorch.org](https://pytorch.org) to make sure of this. Note, please check - PyTorch version matches that is required by Detectron2. -- Detectron2: follow [Detectron2 installation instructions](https://detectron2.readthedocs.io/tutorials/install.html). -- OpenCV is optional but needed by demo and visualization -- `pip install -r requirements.txt` - -An example of installation is shown below: - -``` -git clone https://github.com/~~~/CAT-Seg.git -cd CAT-Seg -conda create -n catseg python=3.8 -conda activate catseg -pip install -r requirements.txt -``` \ No newline at end of file diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/utils/registry.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/utils/registry.py deleted file mode 100644 index fea1de961f0dbdacc934e11b9af5647b2a008051..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/utils/registry.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -# Keep this module for backward compatibility. -from fvcore.common.registry import Registry # noqa - -__all__ = ["Registry"] diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/networks/AugmentCE2P.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/networks/AugmentCE2P.py deleted file mode 100644 index b5d2c7f88e51dbde32c551ba933647a137395147..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/networks/AugmentCE2P.py +++ /dev/null @@ -1,337 +0,0 @@ -#!/usr/bin/env python -# -*- encoding: utf-8 -*- - -""" -@Author : Peike Li -@Contact : peike.li@yahoo.com -@File : AugmentCE2P.py -@Time : 8/4/19 3:35 PM -@Desc : -@License : This source code is licensed under the license found in the - LICENSE file in the root directory of this source tree. -""" - -import functools - -import torch -import torch.nn as nn -from torch.nn import functional as F -# Note here we adopt the InplaceABNSync implementation from https://github.com/mapillary/inplace_abn -# By default, the InplaceABNSync module contains a BatchNorm Layer and a LeakyReLu layer -from modules import InPlaceABNSync - -BatchNorm2d = functools.partial(InPlaceABNSync, activation='none') - -affine_par = True - -pretrained_settings = { - 'resnet101': { - 'imagenet': { - 'input_space': 'BGR', - 'input_size': [3, 224, 224], - 'input_range': [0, 1], - 'mean': [0.406, 0.456, 0.485], - 'std': [0.225, 0.224, 0.229], - 'num_classes': 1000 - } - }, -} - - -def conv3x3(in_planes, out_planes, stride=1): - "3x3 convolution with padding" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, fist_dilation=1, multi_grid=1): - super(Bottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - self.bn1 = BatchNorm2d(planes) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, - padding=dilation * multi_grid, dilation=dilation * multi_grid, bias=False) - self.bn2 = BatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) - self.bn3 = BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=False) - self.relu_inplace = nn.ReLU(inplace=True) - self.downsample = downsample - self.dilation = dilation - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out = out + residual - out = self.relu_inplace(out) - - return out - - -class PSPModule(nn.Module): - """ - Reference: - Zhao, Hengshuang, et al. *"Pyramid scene parsing network."* - """ - - def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6)): - super(PSPModule, self).__init__() - - self.stages = [] - self.stages = nn.ModuleList([self._make_stage(features, out_features, size) for size in sizes]) - self.bottleneck = nn.Sequential( - nn.Conv2d(features + len(sizes) * out_features, out_features, kernel_size=3, padding=1, dilation=1, - bias=False), - InPlaceABNSync(out_features), - ) - - def _make_stage(self, features, out_features, size): - prior = nn.AdaptiveAvgPool2d(output_size=(size, size)) - conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False) - bn = InPlaceABNSync(out_features) - return nn.Sequential(prior, conv, bn) - - def forward(self, feats): - h, w = feats.size(2), feats.size(3) - priors = [F.interpolate(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True) for stage in - self.stages] + [feats] - bottle = self.bottleneck(torch.cat(priors, 1)) - return bottle - - -class ASPPModule(nn.Module): - """ - Reference: - Chen, Liang-Chieh, et al. *"Rethinking Atrous Convolution for Semantic Image Segmentation."* - """ - - def __init__(self, features, inner_features=256, out_features=512, dilations=(12, 24, 36)): - super(ASPPModule, self).__init__() - - self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), - nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, - bias=False), - InPlaceABNSync(inner_features)) - self.conv2 = nn.Sequential( - nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False), - InPlaceABNSync(inner_features)) - self.conv3 = nn.Sequential( - nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False), - InPlaceABNSync(inner_features)) - self.conv4 = nn.Sequential( - nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False), - InPlaceABNSync(inner_features)) - self.conv5 = nn.Sequential( - nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False), - InPlaceABNSync(inner_features)) - - self.bottleneck = nn.Sequential( - nn.Conv2d(inner_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False), - InPlaceABNSync(out_features), - nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - - feat1 = F.interpolate(self.conv1(x), size=(h, w), mode='bilinear', align_corners=True) - - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), 1) - - bottle = self.bottleneck(out) - return bottle - - -class Edge_Module(nn.Module): - """ - Edge Learning Branch - """ - - def __init__(self, in_fea=[256, 512, 1024], mid_fea=256, out_fea=2): - super(Edge_Module, self).__init__() - - self.conv1 = nn.Sequential( - nn.Conv2d(in_fea[0], mid_fea, kernel_size=1, padding=0, dilation=1, bias=False), - InPlaceABNSync(mid_fea) - ) - self.conv2 = nn.Sequential( - nn.Conv2d(in_fea[1], mid_fea, kernel_size=1, padding=0, dilation=1, bias=False), - InPlaceABNSync(mid_fea) - ) - self.conv3 = nn.Sequential( - nn.Conv2d(in_fea[2], mid_fea, kernel_size=1, padding=0, dilation=1, bias=False), - InPlaceABNSync(mid_fea) - ) - self.conv4 = nn.Conv2d(mid_fea, out_fea, kernel_size=3, padding=1, dilation=1, bias=True) - self.conv5 = nn.Conv2d(out_fea * 3, out_fea, kernel_size=1, padding=0, dilation=1, bias=True) - - def forward(self, x1, x2, x3): - _, _, h, w = x1.size() - - edge1_fea = self.conv1(x1) - edge1 = self.conv4(edge1_fea) - edge2_fea = self.conv2(x2) - edge2 = self.conv4(edge2_fea) - edge3_fea = self.conv3(x3) - edge3 = self.conv4(edge3_fea) - - edge2_fea = F.interpolate(edge2_fea, size=(h, w), mode='bilinear', align_corners=True) - edge3_fea = F.interpolate(edge3_fea, size=(h, w), mode='bilinear', align_corners=True) - edge2 = F.interpolate(edge2, size=(h, w), mode='bilinear', align_corners=True) - edge3 = F.interpolate(edge3, size=(h, w), mode='bilinear', align_corners=True) - - edge = torch.cat([edge1, edge2, edge3], dim=1) - edge_fea = torch.cat([edge1_fea, edge2_fea, edge3_fea], dim=1) - edge = self.conv5(edge) - - return edge, edge_fea - - -class Decoder_Module(nn.Module): - """ - Parsing Branch Decoder Module. - """ - - def __init__(self, num_classes): - super(Decoder_Module, self).__init__() - self.conv1 = nn.Sequential( - nn.Conv2d(512, 256, kernel_size=1, padding=0, dilation=1, bias=False), - InPlaceABNSync(256) - ) - self.conv2 = nn.Sequential( - nn.Conv2d(256, 48, kernel_size=1, stride=1, padding=0, dilation=1, bias=False), - InPlaceABNSync(48) - ) - self.conv3 = nn.Sequential( - nn.Conv2d(304, 256, kernel_size=1, padding=0, dilation=1, bias=False), - InPlaceABNSync(256), - nn.Conv2d(256, 256, kernel_size=1, padding=0, dilation=1, bias=False), - InPlaceABNSync(256) - ) - - self.conv4 = nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=True) - - def forward(self, xt, xl): - _, _, h, w = xl.size() - xt = F.interpolate(self.conv1(xt), size=(h, w), mode='bilinear', align_corners=True) - xl = self.conv2(xl) - x = torch.cat([xt, xl], dim=1) - x = self.conv3(x) - seg = self.conv4(x) - return seg, x - - -class ResNet(nn.Module): - def __init__(self, block, layers, num_classes): - self.inplanes = 128 - super(ResNet, self).__init__() - self.conv1 = conv3x3(3, 64, stride=2) - self.bn1 = BatchNorm2d(64) - self.relu1 = nn.ReLU(inplace=False) - self.conv2 = conv3x3(64, 64) - self.bn2 = BatchNorm2d(64) - self.relu2 = nn.ReLU(inplace=False) - self.conv3 = conv3x3(64, 128) - self.bn3 = BatchNorm2d(128) - self.relu3 = nn.ReLU(inplace=False) - - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2) - self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=2, multi_grid=(1, 1, 1)) - - self.context_encoding = PSPModule(2048, 512) - - self.edge = Edge_Module() - self.decoder = Decoder_Module(num_classes) - - self.fushion = nn.Sequential( - nn.Conv2d(1024, 256, kernel_size=1, padding=0, dilation=1, bias=False), - InPlaceABNSync(256), - nn.Dropout2d(0.1), - nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=True) - ) - - def _make_layer(self, block, planes, blocks, stride=1, dilation=1, multi_grid=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(self.inplanes, planes * block.expansion, - kernel_size=1, stride=stride, bias=False), - BatchNorm2d(planes * block.expansion, affine=affine_par)) - - layers = [] - generate_multi_grid = lambda index, grids: grids[index % len(grids)] if isinstance(grids, tuple) else 1 - layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample, - multi_grid=generate_multi_grid(0, multi_grid))) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append( - block(self.inplanes, planes, dilation=dilation, multi_grid=generate_multi_grid(i, multi_grid))) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.relu1(self.bn1(self.conv1(x))) - x = self.relu2(self.bn2(self.conv2(x))) - x = self.relu3(self.bn3(self.conv3(x))) - x = self.maxpool(x) - x2 = self.layer1(x) - x3 = self.layer2(x2) - x4 = self.layer3(x3) - x5 = self.layer4(x4) - x = self.context_encoding(x5) - parsing_result, parsing_fea = self.decoder(x, x2) - # Edge Branch - edge_result, edge_fea = self.edge(x2, x3, x4) - # Fusion Branch - x = torch.cat([parsing_fea, edge_fea], dim=1) - fusion_result = self.fushion(x) - return [[parsing_result, fusion_result], [edge_result]] - - -def initialize_pretrained_model(model, settings, pretrained='./models/resnet101-imagenet.pth'): - model.input_space = settings['input_space'] - model.input_size = settings['input_size'] - model.input_range = settings['input_range'] - model.mean = settings['mean'] - model.std = settings['std'] - - if pretrained is not None: - saved_state_dict = torch.load(pretrained) - new_params = model.state_dict().copy() - for i in saved_state_dict: - i_parts = i.split('.') - if not i_parts[0] == 'fc': - new_params['.'.join(i_parts[0:])] = saved_state_dict[i] - model.load_state_dict(new_params) - - -def resnet101(num_classes=20, pretrained='./models/resnet101-imagenet.pth'): - model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes) - settings = pretrained_settings['resnet101']['imagenet'] - initialize_pretrained_model(model, settings, pretrained) - return model diff --git a/spaces/hasibzunair/fifa-tryon-demo/rembg/session_simple.py b/spaces/hasibzunair/fifa-tryon-demo/rembg/session_simple.py deleted file mode 100644 index 7ec31813f2e14e80856803d2335671c9f50ca84f..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/rembg/session_simple.py +++ /dev/null @@ -1,30 +0,0 @@ -from typing import List - -import numpy as np -from PIL import Image -from PIL.Image import Image as PILImage - -from .session_base import BaseSession - - -class SimpleSession(BaseSession): - def predict(self, img: PILImage) -> List[PILImage]: - ort_outs = self.inner_session.run( - None, - self.normalize( - img, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225), (320, 320) - ), - ) - - pred = ort_outs[0][:, 0, :, :] - - ma = np.max(pred) - mi = np.min(pred) - - pred = (pred - mi) / (ma - mi) - pred = np.squeeze(pred) - - mask = Image.fromarray((pred * 255).astype("uint8"), mode="L") - mask = mask.resize(img.size, Image.LANCZOS) - - return [mask] diff --git a/spaces/hectorduran/wavescomparing/app.py b/spaces/hectorduran/wavescomparing/app.py deleted file mode 100644 index 523364b2cf67fe3c090bd779aac19187010fa467..0000000000000000000000000000000000000000 --- a/spaces/hectorduran/wavescomparing/app.py +++ /dev/null @@ -1,125 +0,0 @@ - -import streamlit as st -import pyttsx3 -from scipy.io import wavfile -from scipy import signal -from scipy.spatial import distance -import plotly.graph_objs as go -import numpy as np -from scipy.interpolate import interp2d - - -def similarity(file1, file2): - fs1, data1 = wavfile.read(file1) - fs2, data2 = wavfile.read(file2) - - f1, t1, Sxx1 = signal.spectrogram(data1, fs=fs1) - f2, t2, Sxx2 = signal.spectrogram(data2, fs=fs2) - - interp1 = interp2d(t1, f1, Sxx1) - interp2 = interp2d(t2, f2, Sxx2) - - tmin = max(t1.min(), t2.min()) - tmax = min(t1.max(), t2.max()) - fmin = max(f1.min(), f2.min()) - fmax = min(f1.max(), f2.max()) - - tnew = np.linspace(tmin, tmax, num=max(len(t1), len(t2))) - fnew = np.linspace(fmin, fmax, num=max(len(f1), len(f2))) - - Sxx1new = interp1(tnew, fnew) - Sxx2new = interp2(tnew, fnew) - - d = distance.euclidean(Sxx1new.flatten(), Sxx2new.flatten()) - max_d = max(Sxx1new.max(), Sxx2new.max()) * len(Sxx1new.flatten()) - - return 100 * (1 - d / max_d) - -st.title('Text to Sound Comparing') - -texto1 = st.text_input('First text','hi') -texto2 = st.text_input('Second text','five') - -intercambiar = st.selectbox('Interchange texts', [False, True]) - -if intercambiar: - texto1, texto2 = texto2, texto1 - -if texto1=="": - texto1="hi" - -if texto2=="": - texto2="five" - -if 1: - engine = pyttsx3.init() - engine.setProperty('voice', 'english') - engine.save_to_file(texto1, 'texto1.wav') - engine.save_to_file(texto2, 'texto2.wav') - engine.runAndWait() - - fs1, data1 = wavfile.read('texto1.wav') - fs2, data2 = wavfile.read('texto2.wav') - - inicio1 = st.slider('Start 1: '+str(texto1), 0, len(data1), 0) - inicio2 = st.slider('Start 2: '+str(texto2), 0, len(data2), 0) - - f1, t1, Sxx1 = signal.spectrogram(data1[inicio1:], fs=fs1) - f2, t2, Sxx2 = signal.spectrogram(data2[inicio2:], fs=fs2) - - fig1 = go.Figure() - fig1.add_trace(go.Scatter(y=data1[inicio1:], mode='lines', name='Text 1: '+str(texto1))) - fig1.add_trace(go.Scatter(y=data2[inicio2:], mode='lines', name='Text 2: '+str(texto2))) - st.header('Waves') - st.plotly_chart(fig1) - - fig2 = go.Figure() - fig2.add_trace(go.Heatmap(x=t1, y=f1, z=Sxx1, name='Spectrogram 1', xaxis='x2', yaxis='y2')) - st.header('Spectrogram '+str(texto1)) - st.plotly_chart(fig2) - - fig3 = go.Figure() - fig3.add_trace(go.Heatmap(x=t2, y=f2, z=Sxx2, name='Spectrogram 2', xaxis='x3', yaxis='y3')) - st.header('Spectrogram '+str(texto2)) - st.plotly_chart(fig3) - - #sim = similarity('texto1.wav', 'texto2.wav') - #print(f'Similarity: {sim:.2f}%') - #st.header('Similarity: '+str(sim)) - - N = len(data1[inicio1:]) - T = 1.0 / fs1 - yf = np.fft.fft(data1[inicio1:]) - xf = np.linspace(0.0, 1.0/(2.0*T), N//2) - fig4 = go.Figure() - fig4.add_trace(go.Scatter(x=xf, y=2.0/N * np.abs(yf[0:N//2]), mode='lines', name='Text 1: '+str(texto1))) - st.header('Fourier '+str(texto1)) - st.plotly_chart(fig4) - - N = len(data2[inicio2:]) - T = 1.0 / fs2 - yf = np.fft.fft(data2[inicio2:]) - xf = np.linspace(0.0, 1.0/(2.0*T), N//2) - fig5 = go.Figure() - fig5.add_trace(go.Scatter(x=xf, y=2.0/N * np.abs(yf[0:N//2]), mode='lines', name='Text 2: '+str(texto2))) - st.header('Fourier '+str(texto2)) - st.plotly_chart(fig5) - - - N = len(data1[inicio1:]) - T = 1.0 / fs1 - yf = np.fft.fft(data1[inicio1:]) - xf = np.linspace(0.0, 1.0/(2.0*T), N//2) - fig6 = go.Figure() - fig6.add_trace(go.Scatter(x=xf, y=2.0/N * np.abs(yf[0:N//2]), mode='lines', name='Text 1: '+str(texto1))) - - N = len(data2[inicio2:]) - T = 1.0 / fs2 - yf = np.fft.fft(data2[inicio2:]) - xf = np.linspace(0.0, 1.0/(2.0*T), N//2) - fig6.add_trace(go.Scatter(x=xf, y=2.0/N * np.abs(yf[0:N//2]), mode='lines', name='Text 2: '+str(texto2))) - - st.header('Fourier Mix') - st.plotly_chart(fig6) - - diff --git a/spaces/heiyuan/ChatGPT/chat_func.py b/spaces/heiyuan/ChatGPT/chat_func.py deleted file mode 100644 index 676259bd4d394240cf0f41f0bcdcb480121c9c98..0000000000000000000000000000000000000000 --- a/spaces/heiyuan/ChatGPT/chat_func.py +++ /dev/null @@ -1,456 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, List - -import logging -import json -import os -import requests -import urllib3 - -from tqdm import tqdm -import colorama -from duckduckgo_search import ddg -import asyncio -import aiohttp - -from presets import * -from llama_func import * -from utils import * - -# logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s") - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - - -initial_prompt = "You are a helpful assistant." -API_URL = "https://api.openai.com/v1/chat/completions" -HISTORY_DIR = "history" -TEMPLATES_DIR = "templates" - -def get_response( - openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model -): - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}", - } - - history = [construct_system(system_prompt), *history] - - payload = { - "model": selected_model, - "messages": history, # [{"role": "user", "content": f"{inputs}"}], - "temperature": temperature, # 1.0, - "top_p": top_p, # 1.0, - "n": 1, - "stream": stream, - "presence_penalty": 0, - "frequency_penalty": 0, - } - if stream: - timeout = timeout_streaming - else: - timeout = timeout_all - - # 获取环境变量中的代理设置 - http_proxy = os.environ.get("HTTP_PROXY") or os.environ.get("http_proxy") - https_proxy = os.environ.get("HTTPS_PROXY") or os.environ.get("https_proxy") - - # 如果存在代理设置,使用它们 - proxies = {} - if http_proxy: - logging.info(f"Using HTTP proxy: {http_proxy}") - proxies["http"] = http_proxy - if https_proxy: - logging.info(f"Using HTTPS proxy: {https_proxy}") - proxies["https"] = https_proxy - - # 如果有代理,使用代理发送请求,否则使用默认设置发送请求 - if proxies: - response = requests.post( - API_URL, - headers=headers, - json=payload, - stream=True, - timeout=timeout, - proxies=proxies, - ) - else: - response = requests.post( - API_URL, - headers=headers, - json=payload, - stream=True, - timeout=timeout, - ) - return response - - -def stream_predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=None, - display_append="" -): - def get_return_value(): - return chatbot, history, status_text, all_token_counts - - logging.info("实时回答模式") - partial_words = "" - counter = 0 - status_text = "开始实时传输回答……" - history.append(construct_user(inputs)) - history.append(construct_assistant("")) - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - user_token_count = 0 - if len(all_token_counts) == 0: - system_prompt_token_count = count_token(construct_system(system_prompt)) - user_token_count = ( - count_token(construct_user(inputs)) + system_prompt_token_count - ) - else: - user_token_count = count_token(construct_user(inputs)) - all_token_counts.append(user_token_count) - logging.info(f"输入token计数: {user_token_count}") - yield get_return_value() - try: - response = get_response( - openai_api_key, - system_prompt, - history, - temperature, - top_p, - True, - selected_model, - ) - except requests.exceptions.ConnectTimeout: - status_text = ( - standard_error_msg + connection_timeout_prompt + error_retrieve_prompt - ) - yield get_return_value() - return - except requests.exceptions.ReadTimeout: - status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt - yield get_return_value() - return - - yield get_return_value() - error_json_str = "" - - for chunk in response.iter_lines(): - if counter == 0: - counter += 1 - continue - counter += 1 - # check whether each line is non-empty - if chunk: - chunk = chunk.decode() - chunklength = len(chunk) - try: - chunk = json.loads(chunk[6:]) - except json.JSONDecodeError: - logging.info(chunk) - error_json_str += chunk - status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}" - yield get_return_value() - continue - # decode each line as response data is in bytes - if chunklength > 6 and "delta" in chunk["choices"][0]: - finish_reason = chunk["choices"][0]["finish_reason"] - status_text = construct_token_message( - sum(all_token_counts), stream=True - ) - if finish_reason == "stop": - yield get_return_value() - break - try: - partial_words = ( - partial_words + chunk["choices"][0]["delta"]["content"] - ) - except KeyError: - status_text = ( - standard_error_msg - + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: " - + str(sum(all_token_counts)) - ) - yield get_return_value() - break - history[-1] = construct_assistant(partial_words) - chatbot[-1] = (chatbot[-1][0], partial_words+display_append) - all_token_counts[-1] += 1 - yield get_return_value() - - -def predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=None, - display_append="" -): - logging.info("一次性回答模式") - history.append(construct_user(inputs)) - history.append(construct_assistant("")) - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - all_token_counts.append(count_token(construct_user(inputs))) - try: - response = get_response( - openai_api_key, - system_prompt, - history, - temperature, - top_p, - False, - selected_model, - ) - except requests.exceptions.ConnectTimeout: - status_text = ( - standard_error_msg + connection_timeout_prompt + error_retrieve_prompt - ) - return chatbot, history, status_text, all_token_counts - except requests.exceptions.ProxyError: - status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - except requests.exceptions.SSLError: - status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - response = json.loads(response.text) - content = response["choices"][0]["message"]["content"] - history[-1] = construct_assistant(content) - chatbot[-1] = (chatbot[-1][0], content+display_append) - total_token_count = response["usage"]["total_tokens"] - all_token_counts[-1] = total_token_count - sum(all_token_counts) - status_text = construct_token_message(total_token_count) - return chatbot, history, status_text, all_token_counts - - -def predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], - use_websearch=False, - files = None, - should_check_token_count=True, -): # repetition_penalty, top_k - logging.info("输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL) - if files: - msg = "构建索引中……(这可能需要比较久的时间)" - logging.info(msg) - yield chatbot, history, msg, all_token_counts - index = construct_index(openai_api_key, file_src=files) - msg = "索引构建完成,获取回答中……" - yield chatbot, history, msg, all_token_counts - history, chatbot, status_text = chat_ai(openai_api_key, index, inputs, history, chatbot) - yield chatbot, history, status_text, all_token_counts - return - - old_inputs = "" - link_references = [] - if use_websearch: - search_results = ddg(inputs, max_results=5) - old_inputs = inputs - web_results = [] - for idx, result in enumerate(search_results): - logging.info(f"搜索结果{idx + 1}:{result}") - domain_name = urllib3.util.parse_url(result["href"]).host - web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}') - link_references.append(f"{idx+1}. [{domain_name}]({result['href']})\n") - link_references = "\n\n" + "".join(link_references) - inputs = ( - replace_today(WEBSEARCH_PTOMPT_TEMPLATE) - .replace("{query}", inputs) - .replace("{web_results}", "\n\n".join(web_results)) - ) - else: - link_references = "" - - if len(openai_api_key) != 51: - status_text = standard_error_msg + no_apikey_msg - logging.info(status_text) - chatbot.append((inputs, "")) - if len(history) == 0: - history.append(construct_user(inputs)) - history.append("") - all_token_counts.append(0) - else: - history[-2] = construct_user(inputs) - yield chatbot, history, status_text, all_token_counts - return - - yield chatbot, history, "开始生成回答……", all_token_counts - - if stream: - logging.info("使用流式传输") - iter = stream_predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=link_references - ) - for chatbot, history, status_text, all_token_counts in iter: - yield chatbot, history, status_text, all_token_counts - else: - logging.info("不使用流式传输") - chatbot, history, status_text, all_token_counts = predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=link_references - ) - yield chatbot, history, status_text, all_token_counts - - logging.info(f"传输完毕。当前token计数为{all_token_counts}") - if len(history) > 1 and history[-1]["content"] != inputs: - logging.info( - "回答为:" - + colorama.Fore.BLUE - + f"{history[-1]['content']}" - + colorama.Style.RESET_ALL - ) - - if stream: - max_token = max_token_streaming - else: - max_token = max_token_all - - if sum(all_token_counts) > max_token and should_check_token_count: - status_text = f"精简token中{all_token_counts}/{max_token}" - logging.info(status_text) - yield chatbot, history, status_text, all_token_counts - iter = reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - all_token_counts, - top_p, - temperature, - max_token//2, - selected_model=selected_model, - ) - for chatbot, history, status_text, all_token_counts in iter: - status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}" - yield chatbot, history, status_text, all_token_counts - - -def retry( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], -): - logging.info("重试中……") - if len(history) == 0: - yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count - return - history.pop() - inputs = history.pop()["content"] - token_count.pop() - iter = predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - token_count, - top_p, - temperature, - stream=stream, - selected_model=selected_model, - ) - logging.info("重试中……") - for x in iter: - yield x - logging.info("重试完毕") - - -def reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - max_token_count, - selected_model=MODELS[0], -): - logging.info("开始减少token数量……") - iter = predict( - openai_api_key, - system_prompt, - history, - summarize_prompt, - chatbot, - token_count, - top_p, - temperature, - selected_model=selected_model, - should_check_token_count=False, - ) - logging.info(f"chatbot: {chatbot}") - flag = False - for chatbot, history, status_text, previous_token_count in iter: - num_chat = find_n(previous_token_count, max_token_count) - if flag: - chatbot = chatbot[:-1] - flag = True - history = history[-2*num_chat:] if num_chat > 0 else [] - token_count = previous_token_count[-num_chat:] if num_chat > 0 else [] - msg = f"保留了最近{num_chat}轮对话" - yield chatbot, history, msg + "," + construct_token_message( - sum(token_count) if len(token_count) > 0 else 0, - ), token_count - logging.info(msg) - logging.info("减少token数量完毕") \ No newline at end of file diff --git a/spaces/heiyubili/bingo/src/pages/api/proxy.ts b/spaces/heiyubili/bingo/src/pages/api/proxy.ts deleted file mode 100644 index 240b5fb5561d993c6381649bf4544ce12f3cdab2..0000000000000000000000000000000000000000 --- a/spaces/heiyubili/bingo/src/pages/api/proxy.ts +++ /dev/null @@ -1,24 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { fetch } from '@/lib/isomorphic' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { url, headers, method = 'GET', body } = req.body - if (!url) { - return res.end('ok') - } - const response = await fetch(url, { headers, method, body, redirect: 'manual' }) - const text = await response.text() - res.writeHead(200, { - 'Content-Type': 'application/text', - 'x-url': response.url, - 'x-status': response.status, - }) - res.end(text) - } catch (e) { - console.log(e) - return res.end(e) - } -} diff --git a/spaces/hkanumilli/DigitClassifier/neural_network.py b/spaces/hkanumilli/DigitClassifier/neural_network.py deleted file mode 100644 index 02a412b67042067fd8e4508b77a952a9e84bceee..0000000000000000000000000000000000000000 --- a/spaces/hkanumilli/DigitClassifier/neural_network.py +++ /dev/null @@ -1,42 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -# class MNISTNetwork(nn.Module): -# # achieved 97 percent accuracy -# def __init__(self): -# super().__init__() -# self.layer1 = nn.Linear(784, 400) -# self.layer2 = nn.Linear(400, 256) -# self.layer3 = nn.Linear(256, 64) -# self.layer4 = nn.Linear(64, 32) -# self.layer5 = nn.Linear(32, 10) - -# def forward(self, x): -# x = x.view(-1, 28*28) -# x = torch.relu(self.layer1(x)) -# x = torch.relu(self.layer2(x)) -# x = torch.relu(self.layer3(x)) -# x = torch.relu(self.layer4(x)) -# x = torch.relu(self.layer5(x)) -# return F.log_softmax(x, dim=1) - -class MNISTNetwork(nn.Module): - # achieved 98.783 percent accuracy - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1) - self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1) - self.fc1 = nn.Linear(64*7*7, 128) - self.fc2 = nn.Linear(128, 10) - - def forward(self, x): - x = F.relu(self.conv1(x)) - x = F.max_pool2d(x, 2) - x = F.relu(self.conv2(x)) - x = F.max_pool2d(x, 2) - x = x.view(-1, 64*7*7) - x = F.relu(self.fc1(x)) - x = self.fc2(x) - return F.log_softmax(x, dim=1) - diff --git a/spaces/hlydecker/RA-document-QAchat/streamlit_langchain_chat/customized_langchain/vectorstores/__init__.py b/spaces/hlydecker/RA-document-QAchat/streamlit_langchain_chat/customized_langchain/vectorstores/__init__.py deleted file mode 100644 index c16bacdecae88056f652d51983d1248af1fbdc3a..0000000000000000000000000000000000000000 --- a/spaces/hlydecker/RA-document-QAchat/streamlit_langchain_chat/customized_langchain/vectorstores/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Wrappers on top of vector stores.""" -from streamlit_langchain_chat.customized_langchain.vectorstores.faiss import FAISS -from streamlit_langchain_chat.customized_langchain.vectorstores.pinecone import Pinecone - -__all__ = [ - "FAISS", - "Pinecone", -] diff --git a/spaces/hstrejoluna/dreambooth-training/app.py b/spaces/hstrejoluna/dreambooth-training/app.py deleted file mode 100644 index 7a438a3bfa4eaeda25f62aefd0cad77d494ed71d..0000000000000000000000000000000000000000 --- a/spaces/hstrejoluna/dreambooth-training/app.py +++ /dev/null @@ -1,659 +0,0 @@ -from subprocess import getoutput -import os - -gpu_info = getoutput('nvidia-smi') -if("A10G" in gpu_info): - which_gpu = "A10G" - os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+4c06c79.d20221205-cp38-cp38-linux_x86_64.whl") -elif("T4" in gpu_info): - which_gpu = "T4" - os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+1515f77.d20221130-cp38-cp38-linux_x86_64.whl") -else: - which_gpu = "CPU" - -import gradio as gr -from pathlib import Path -import argparse -import shutil -from train_dreambooth import run_training -from convertosd import convert -from PIL import Image -from slugify import slugify -import requests -import torch -import zipfile -import tarfile -import urllib.parse -import gc -from diffusers import StableDiffusionPipeline -from huggingface_hub import snapshot_download, update_repo_visibility, HfApi - -is_spaces = True if "SPACE_ID" in os.environ else False -if(is_spaces): - is_shared_ui = True if "multimodalart/dreambooth-training" in os.environ['SPACE_ID'] else False -else: - is_shared_ui = False -is_gpu_associated = torch.cuda.is_available() - -css = ''' - .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important} - .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important} - #component-4, #component-3, #component-10{min-height: 0} - .duplicate-button img{margin: 0} -''' -maximum_concepts = 3 - -#Pre download the files -if(is_gpu_associated): - model_v1 = snapshot_download(repo_id="multimodalart/sd-fine-tunable") - model_v2 = snapshot_download(repo_id="stabilityai/stable-diffusion-2-1", ignore_patterns=["*.ckpt", "*.safetensors"]) - model_v2_512 = snapshot_download(repo_id="stabilityai/stable-diffusion-2-1-base", ignore_patterns=["*.ckpt", "*.safetensors"]) - safety_checker = snapshot_download(repo_id="multimodalart/sd-sc") - model_to_load = model_v1 - -#with zipfile.ZipFile("mix.zip", 'r') as zip_ref: -# zip_ref.extractall(".") - -def swap_text(option, base): - resize_width = 768 if base == "v2-1-768" else 512 - mandatory_liability = "You must have the right to do so and you are liable for the images you use, example:" - if(option == "object"): - instance_prompt_example = "cttoy" - freeze_for = 30 - return [f"You are going to train `object`(s), upload 5-10 images of each object you are planning on training on from different angles/perspectives. You can use services like <a style='text-decoration: underline' target='_blank' href='https://www.birme.net/?target_width={resize_width}&target_height={resize_width}'>birme</a> for smart cropping. {mandatory_liability}:", '''<img src="file/cat-toy.png" />''', f"You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to {resize_width}x{resize_width}.", freeze_for, gr.update(visible=False)] - elif(option == "person"): - instance_prompt_example = "julcto" - freeze_for = 70 - #show_prior_preservation = True if base != "v2-1-768" else False - show_prior_preservation=False - if(show_prior_preservation): - prior_preservation_box_update = gr.update(visible=show_prior_preservation) - else: - prior_preservation_box_update = gr.update(visible=show_prior_preservation, value=False) - return [f"You are going to train a `person`(s), upload 10-20 images of each person you are planning on training on from different angles/perspectives. You can use services like <a style='text-decoration: underline' target='_blank' href='https://www.birme.net/?target_width={resize_width}&target_height={resize_width}'>birme</a> for smart cropping. {mandatory_liability}:", '''<img src="file/person.png" />''', f"You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to {resize_width}x{resize_width}.", freeze_for, prior_preservation_box_update] - elif(option == "style"): - instance_prompt_example = "trsldamrl" - freeze_for = 10 - return [f"You are going to train a `style`, upload 10-20 images of the style you are planning on training on. You can use services like <a style='text-decoration: underline' target='_blank' href='https://www.birme.net/?target_width={resize_width}&target_height={resize_width}'>birme</a> for smart cropping. {mandatory_liability}:", '''<img src="file/trsl_style.png" />''', f"You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to {resize_width}x{resize_width}", freeze_for, gr.update(visible=False)] - -def swap_base_model(selected_model): - if(is_gpu_associated): - global model_to_load - if(selected_model == "v1-5"): - model_to_load = model_v1 - elif(selected_model == "v2-1-768"): - model_to_load = model_v2 - else: - model_to_load = model_v2_512 - -def count_files(*inputs): - file_counter = 0 - concept_counter = 0 - for i, input in enumerate(inputs): - if(i < maximum_concepts-1): - files = inputs[i] - if(files): - concept_counter+=1 - file_counter+=len(files) - uses_custom = inputs[-1] - type_of_thing = inputs[-4] - selected_model = inputs[-5] - experimental_faces = inputs[-6] - if(uses_custom): - Training_Steps = int(inputs[-3]) - else: - Training_Steps = file_counter*150 - if(type_of_thing == "person" and Training_Steps > 2400): - Training_Steps = 2400 #Avoid overfitting on person faces - if(is_spaces): - if(selected_model == "v1-5"): - its = 1.1 if which_gpu == "T4" else 1.8 - if(experimental_faces): - its = 1 - elif(selected_model == "v2-1-512"): - its = 0.8 if which_gpu == "T4" else 1.5 - if(experimental_faces): - its = 0.7 - elif(selected_model == "v2-1-768"): - its = 0.48 if which_gpu == "T4" else 0.85 - - gpu_price = 0.60 if which_gpu == "T4" else 1.10 - summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. The training should take around {round(Training_Steps/its, 2)} seconds, or {round((Training_Steps/its)/60, 2)} minutes. - The setup, compression and uploading the model can take up to 20 minutes.<br>As the {which_gpu}-Small GPU costs US${gpu_price} for 1h, <span style="font-size: 120%"><b>the estimated cost for this training is below US${round((((Training_Steps/its)/3600)+0.3+0.1)*gpu_price, 2)}.</b></span><br><br> - If you check the box below the GPU attribution will automatically removed after training is done and the model is uploaded. If not, don't forget to come back here and swap the hardware back to CPU.<br><br>''' - else: - summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps.<br><br>''' - - return([gr.update(visible=True), gr.update(visible=True, value=summary_sentence)]) - -def update_steps(*files_list): - file_counter = 0 - for i, files in enumerate(files_list): - if(files): - file_counter+=len(files) - return(gr.update(value=file_counter*200)) - -def pad_image(image): - w, h = image.size - if w == h: - return image - elif w > h: - new_image = Image.new(image.mode, (w, w), (0, 0, 0)) - new_image.paste(image, (0, (w - h) // 2)) - return new_image - else: - new_image = Image.new(image.mode, (h, h), (0, 0, 0)) - new_image.paste(image, ((h - w) // 2, 0)) - return new_image - -def validate_model_upload(hf_token, model_name): - if(hf_token != ''): - api = HfApi() - try: - _ = api.whoami(hf_token) - except: - raise gr.Error("You have inserted an invalid Hugging Face token") - try: - if(is_spaces): - update_repo_visibility(repo_id=os.environ['SPACE_ID'], private=True, token=hf_token, repo_type="space") - except: - raise gr.Error("Oops, you created a Hugging Face token with read permissions only. You need one with write permissions") - else: - raise gr.Error("Please insert a Hugging Face Token (make sure to create it with write permissions)") - if(model_name == ""): - raise gr.Error("Please fill in your model's name") - -def train(*inputs): - if is_shared_ui: - raise gr.Error("This Space only works in duplicated instances") - if not is_gpu_associated: - raise gr.Error("Please associate a T4 or A10G GPU for this Space") - hf_token = inputs[-5] - model_name = inputs[-7] - if(is_spaces): - remove_attribution_after = inputs[-6] - else: - remove_attribution_after = False - - if(remove_attribution_after): - validate_model_upload(hf_token, model_name) - - torch.cuda.empty_cache() - if 'pipe' in globals(): - global pipe, pipe_is_set - del pipe - pipe_is_set = False - gc.collect() - - if os.path.exists("output_model"): shutil.rmtree('output_model') - if os.path.exists("instance_images"): shutil.rmtree('instance_images') - if os.path.exists("diffusers_model.tar"): os.remove("diffusers_model.tar") - if os.path.exists("model.ckpt"): os.remove("model.ckpt") - if os.path.exists("hastrained.success"): os.remove("hastrained.success") - file_counter = 0 - which_model = inputs[-10] - resolution = 512 if which_model != "v2-1-768" else 768 - for i, input in enumerate(inputs): - if(i < maximum_concepts-1): - if(input): - os.makedirs('instance_images',exist_ok=True) - files = inputs[i+(maximum_concepts*2)] - prompt = inputs[i+maximum_concepts] - if(prompt == "" or prompt == None): - raise gr.Error("You forgot to define your concept prompt") - for j, file_temp in enumerate(files): - file = Image.open(file_temp.name) - image = pad_image(file) - image = image.resize((resolution, resolution)) - extension = file_temp.name.split(".")[1] - image = image.convert('RGB') - image.save(f'instance_images/{prompt}_({j+1}).jpg', format="JPEG", quality = 100) - file_counter += 1 - - os.makedirs('output_model',exist_ok=True) - uses_custom = inputs[-1] - type_of_thing = inputs[-4] - experimental_face_improvement = inputs[-9] - - if(uses_custom): - Training_Steps = int(inputs[-3]) - Train_text_encoder_for = int(inputs[-2]) - else: - if(type_of_thing == "object"): - Train_text_encoder_for=30 - - elif(type_of_thing == "style"): - Train_text_encoder_for=15 - - elif(type_of_thing == "person"): - Train_text_encoder_for=70 - - Training_Steps = file_counter*150 - if(type_of_thing == "person" and Training_Steps > 2600): - Training_Steps = 2600 #Avoid overfitting on people's faces - stptxt = int((Training_Steps*Train_text_encoder_for)/100) - gradient_checkpointing = True if (experimental_face_improvement or which_model != "v1-5") else False - cache_latents = True if which_model != "v1-5" else False - if (type_of_thing == "object" or type_of_thing == "style" or (type_of_thing == "person" and not experimental_face_improvement)): - args_general = argparse.Namespace( - image_captions_filename = True, - train_text_encoder = True if stptxt > 0 else False, - stop_text_encoder_training = stptxt, - save_n_steps = 0, - pretrained_model_name_or_path = model_to_load, - instance_data_dir="instance_images", - class_data_dir=None, - output_dir="output_model", - instance_prompt="", - seed=42, - resolution=resolution, - mixed_precision="fp16", - train_batch_size=1, - gradient_accumulation_steps=1, - use_8bit_adam=True, - learning_rate=2e-6, - lr_scheduler="polynomial", - lr_warmup_steps = 0, - max_train_steps=Training_Steps, - gradient_checkpointing=gradient_checkpointing, - cache_latents=cache_latents, - ) - print("Starting single training...") - lock_file = open("intraining.lock", "w") - lock_file.close() - run_training(args_general) - else: - args_general = argparse.Namespace( - image_captions_filename = True, - train_text_encoder = True if stptxt > 0 else False, - stop_text_encoder_training = stptxt, - save_n_steps = 0, - pretrained_model_name_or_path = model_to_load, - instance_data_dir="instance_images", - class_data_dir="Mix", - output_dir="output_model", - with_prior_preservation=True, - prior_loss_weight=1.0, - instance_prompt="", - seed=42, - resolution=resolution, - mixed_precision="fp16", - train_batch_size=1, - gradient_accumulation_steps=1, - use_8bit_adam=True, - learning_rate=2e-6, - lr_scheduler="polynomial", - lr_warmup_steps = 0, - max_train_steps=Training_Steps, - num_class_images=200, - gradient_checkpointing=gradient_checkpointing, - cache_latents=cache_latents, - ) - print("Starting multi-training...") - lock_file = open("intraining.lock", "w") - lock_file.close() - run_training(args_general) - gc.collect() - torch.cuda.empty_cache() - if(which_model == "v1-5"): - print("Adding Safety Checker to the model...") - shutil.copytree(f"{safety_checker}/feature_extractor", "output_model/feature_extractor", dirs_exist_ok=True) - shutil.copytree(f"{safety_checker}/safety_checker", "output_model/safety_checker", dirs_exist_ok=True) - shutil.copy(f"model_index.json", "output_model/model_index.json") - - if(not remove_attribution_after): - print("Archiving model file...") - with tarfile.open("diffusers_model.tar", "w") as tar: - tar.add("output_model", arcname=os.path.basename("output_model")) - if os.path.exists("intraining.lock"): os.remove("intraining.lock") - trained_file = open("hastrained.success", "w") - trained_file.close() - print("Training completed!") - return [ - gr.update(visible=True, value=["diffusers_model.tar"]), #result - gr.update(visible=True), #try_your_model - gr.update(visible=True), #push_to_hub - gr.update(visible=True), #convert_button - gr.update(visible=False), #training_ongoing - gr.update(visible=True) #completed_training - ] - else: - where_to_upload = inputs[-8] - push(model_name, where_to_upload, hf_token, which_model, True) - hardware_url = f"https://huggingface.co/spaces/{os.environ['SPACE_ID']}/hardware" - headers = { "authorization" : f"Bearer {hf_token}"} - body = {'flavor': 'cpu-basic'} - requests.post(hardware_url, json = body, headers=headers) - -pipe_is_set = False -def generate(prompt, steps): - torch.cuda.empty_cache() - from diffusers import StableDiffusionPipeline - global pipe_is_set - if(not pipe_is_set): - global pipe - pipe = StableDiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16) - pipe = pipe.to("cuda") - pipe_is_set = True - - image = pipe(prompt, num_inference_steps=steps).images[0] - return(image) - -def push(model_name, where_to_upload, hf_token, which_model, comes_from_automated=False): - validate_model_upload(hf_token, model_name) - if(not os.path.exists("model.ckpt")): - convert("output_model", "model.ckpt") - from huggingface_hub import HfApi, HfFolder, CommitOperationAdd - from huggingface_hub import create_repo - model_name_slug = slugify(model_name) - api = HfApi() - your_username = api.whoami(token=hf_token)["name"] - if(where_to_upload == "My personal profile"): - model_id = f"{your_username}/{model_name_slug}" - else: - model_id = f"sd-dreambooth-library/{model_name_slug}" - headers = {"Authorization" : f"Bearer: {hf_token}", "Content-Type": "application/json"} - response = requests.post("https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", headers=headers) - - print(f"Starting to upload the model {model_id}...") - images_upload = os.listdir("instance_images") - image_string = "" - instance_prompt_list = [] - previous_instance_prompt = '' - for i, image in enumerate(images_upload): - instance_prompt = image.split("_")[0] - if(instance_prompt != previous_instance_prompt): - title_instance_prompt_string = instance_prompt - instance_prompt_list.append(instance_prompt) - else: - title_instance_prompt_string = '' - previous_instance_prompt = instance_prompt - image_string = f'''{title_instance_prompt_string} {"(use that on your prompt)" if title_instance_prompt_string != "" else ""} -{image_string}![{instance_prompt} {i}](https://huggingface.co/{model_id}/resolve/main/concept_images/{urllib.parse.quote(image)})''' - readme_text = f'''--- -license: creativeml-openrail-m -tags: -- text-to-image -widget: -- text: {instance_prompt_list[0]} ---- -### {model_name} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the {which_model} base model - -You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts! - -Sample pictures of: -{image_string} -''' - #Save the readme to a file - readme_file = open("model.README.md", "w") - readme_file.write(readme_text) - readme_file.close() - #Save the token identifier to a file - text_file = open("token_identifier.txt", "w") - text_file.write(', '.join(instance_prompt_list)) - text_file.close() - try: - create_repo(model_id,private=True, token=hf_token) - except: - import time - epoch_time = str(int(time.time())) - create_repo(f"{model_id}-{epoch_time}", private=True,token=hf_token) - operations = [ - CommitOperationAdd(path_in_repo="token_identifier.txt", path_or_fileobj="token_identifier.txt"), - CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="model.README.md"), - CommitOperationAdd(path_in_repo=f"model.ckpt",path_or_fileobj="model.ckpt") - ] - api.create_commit( - repo_id=model_id, - operations=operations, - commit_message=f"Upload the model {model_name}", - token=hf_token - ) - api.upload_folder( - folder_path="output_model", - repo_id=model_id, - token=hf_token - ) - api.upload_folder( - folder_path="instance_images", - path_in_repo="concept_images", - repo_id=model_id, - token=hf_token - ) - if is_spaces: - if(not comes_from_automated): - extra_message = "Don't forget to remove the GPU attribution after you play with it." - else: - extra_message = "The GPU has been removed automatically as requested, and you can try the model via the model page" - api.create_discussion(repo_id=os.environ['SPACE_ID'], title=f"Your model {model_name} has finished trained from the Dreambooth Train Spaces!", description=f"Your model has been successfully uploaded to: https://huggingface.co/{model_id}. {extra_message}",repo_type="space", token=hf_token) - print("Model uploaded successfully!") - return [gr.update(visible=True, value=f"Successfully uploaded your model. Access it [here](https://huggingface.co/{model_id})"), gr.update(visible=True, value=["diffusers_model.tar", "model.ckpt"])] - -def convert_to_ckpt(): - if 'pipe' in globals(): - global pipe, pipe_is_set - del pipe - pipe_is_set = False - gc.collect() - convert("output_model", "model.ckpt") - return gr.update(visible=True, value=["diffusers_model.tar", "model.ckpt"]) - -def check_status(top_description): - if os.path.exists("hastrained.success"): - if is_spaces: - update_top_tag = gr.update(value=f''' - <div class="gr-prose" style="max-width: 80%"> - <h2>Your model has finished training ✅</h2> - <p>Yay, congratulations on training your model. Scroll down to play with with it, save it (either downloading it or on the Hugging Face Hub). Once you are done, your model is safe, and you don't want to train a new one, go to the <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}" target="_blank">settings page</a> and downgrade your Space to a CPU Basic</p> - </div> - ''') - else: - update_top_tag = gr.update(value=f''' - <div class="gr-prose" style="max-width: 80%"> - <h2>Your model has finished training ✅</h2> - <p>Yay, congratulations on training your model. Scroll down to play with with it, save it (either downloading it or on the Hugging Face Hub).</p> - </div> - ''') - show_outputs = True - elif os.path.exists("intraining.lock"): - update_top_tag = gr.update(value=''' - <div class="gr-prose" style="max-width: 80%"> - <h2>Don't worry, your model is still training! ⌛</h2> - <p>You closed the tab while your model was training, but it's all good! It is still training right now. You can click the "Open logs" button above here to check the training status. Once training is done, reload this tab to interact with your model</p> - </div> - ''') - show_outputs = False - else: - update_top_tag = gr.update(value=top_description) - show_outputs = False - if os.path.exists("diffusers_model.tar"): - update_files_tag = gr.update(visible=show_outputs, value=["diffusers_model.tar"]) - else: - update_files_tag = gr.update(visible=show_outputs) - return [ - update_top_tag, #top_description - gr.update(visible=show_outputs), #try_your_model - gr.update(visible=show_outputs), #push_to_hub - update_files_tag, #result - gr.update(visible=show_outputs), #convert_button - ] - -def checkbox_swap(checkbox): - return [gr.update(visible=checkbox), gr.update(visible=checkbox), gr.update(visible=checkbox), gr.update(visible=checkbox)] - -with gr.Blocks(css=css) as demo: - with gr.Box(): - if is_shared_ui: - top_description = gr.HTML(f''' - <div class="gr-prose" style="max-width: 80%"> - <h2>Attention - This Space doesn't work in this shared UI</h2> - <p>For it to work, you can either run locally or duplicate the Space and run it on your own profile using a (paid) private T4-small or A10G-small GPU for training. A T4 costs US$0.60/h, so it should cost < US$1 to train most models using default settings with it!  <a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p> - <img class="instruction" src="file/duplicate.png"> - <img class="arrow" src="file/arrow.png" /> - </div> - ''') - elif(is_spaces): - if(is_gpu_associated): - top_description = gr.HTML(f''' - <div class="gr-prose" style="max-width: 80%"> - <h2>You have successfully associated a {which_gpu} GPU to the Dreambooth Training Space 🎉</h2> - <p>You can now train your model! You will be billed by the minute from when you activated the GPU until when it is turned it off.</p> - </div> - ''') - else: - top_description = gr.HTML(f''' - <div class="gr-prose" style="max-width: 80%"> - <h2>You have successfully duplicated the Dreambooth Training Space 🎉</h2> - <p>There's only one step left before you can train your model: <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a <b>T4-small or A10G-small GPU</b> to it (via the Settings tab)</a> and run the training below. You will be billed by the minute from when you activate the GPU until when it is turned it off.</p> - </div> - ''') - else: - top_description = gr.HTML(f''' - <div class="gr-prose" style="max-width: 80%"> - <h2>You have successfully cloned the Dreambooth Training Space locally 🎉</h2> - <p>Do a <code>pip install requirements-local.txt</code></p> - </div> - ''') - gr.Markdown("# Dreambooth Training UI 💭") - gr.Markdown("Customize Stable Diffusion v1 or v2 (ⁿᵉʷ!) by giving it a few examples of a concept. Based on the [🧨 diffusers](https://github.com/huggingface/diffusers) implementation, additional techniques from [TheLastBen](https://github.com/TheLastBen/diffusers) and [ShivamShrirao](https://github.com/ShivamShrirao/diffusers)") - - with gr.Row() as what_are_you_training: - type_of_thing = gr.Dropdown(label="What would you like to train?", choices=["object", "person", "style"], value="object", interactive=True) - base_model_to_use = gr.Dropdown(label="Which base model would you like to use?", choices=["v1-5", "v2-1-512", "v2-1-768"], value="v1-5", interactive=True) - - #Very hacky approach to emulate dynamically created Gradio components - with gr.Row() as upload_your_concept: - with gr.Column(): - thing_description = gr.Markdown("You are going to train an `object`, please upload 5-10 images of the object you are planning on training on from different angles/perspectives. You must have the right to do so and you are liable for the images you use, example") - thing_experimental = gr.Checkbox(label="Improve faces (prior preservation) - can take longer training but can improve faces", visible=False, value=False) - thing_image_example = gr.HTML('''<img src="file/cat-toy.png" />''') - things_naming = gr.Markdown("You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `cttoy` here). Images will be automatically cropped to 512x512.") - - with gr.Column(): - file_collection = [] - concept_collection = [] - buttons_collection = [] - delete_collection = [] - is_visible = [] - - row = [None] * maximum_concepts - for x in range(maximum_concepts): - ordinal = lambda n: "%d%s" % (n, "tsnrhtdd"[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10::4]) - if(x == 0): - visible = True - is_visible.append(gr.State(value=True)) - else: - visible = False - is_visible.append(gr.State(value=False)) - - file_collection.append(gr.File(label=f'''Upload the images for your {ordinal(x+1) if (x>0) else ""} concept''', file_count="multiple", interactive=True, visible=visible)) - with gr.Column(visible=visible) as row[x]: - concept_collection.append(gr.Textbox(label=f'''{ordinal(x+1) if (x>0) else ""} concept prompt - use a unique, made up word to avoid collisions''')) - with gr.Row(): - if(x < maximum_concepts-1): - buttons_collection.append(gr.Button(value="Add +1 concept", visible=visible)) - if(x > 0): - delete_collection.append(gr.Button(value=f"Delete {ordinal(x+1)} concept")) - - counter_add = 1 - for button in buttons_collection: - if(counter_add < len(buttons_collection)): - button.click(lambda: - [gr.update(visible=True),gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), True, None], - None, - [row[counter_add], file_collection[counter_add], buttons_collection[counter_add-1], buttons_collection[counter_add], is_visible[counter_add], file_collection[counter_add]], queue=False) - else: - button.click(lambda:[gr.update(visible=True),gr.update(visible=True), gr.update(visible=False), True], None, [row[counter_add], file_collection[counter_add], buttons_collection[counter_add-1], is_visible[counter_add]], queue=False) - counter_add += 1 - - counter_delete = 1 - for delete_button in delete_collection: - if(counter_delete < len(delete_collection)+1): - delete_button.click(lambda:[gr.update(visible=False),gr.update(visible=False), gr.update(visible=True), False], None, [file_collection[counter_delete], row[counter_delete], buttons_collection[counter_delete-1], is_visible[counter_delete]], queue=False) - counter_delete += 1 - - with gr.Accordion("Custom Settings", open=False): - swap_auto_calculated = gr.Checkbox(label="Use custom settings") - gr.Markdown("If not checked, the % of frozen encoder will be tuned automatically to whether you are training an `object`, `person` or `style`. The text-encoder is frozen after 10% of the steps for a style, 30% of the steps for an object and 75% trained for persons. The number of steps varies between 1400 and 2400 depending on how many images uploaded. If you see too many artifacts in your output, it means it may have overfit and you need less steps. If your results aren't really what you wanted, it may be underfitting and you need more steps.") - steps = gr.Number(label="How many steps", value=2400) - perc_txt_encoder = gr.Number(label="Percentage of the training steps the text-encoder should be trained as well", value=30) - - with gr.Box(visible=False) as training_summary: - training_summary_text = gr.HTML("", visible=True, label="Training Summary") - is_advanced_visible = True if is_spaces else False - training_summary_checkbox = gr.Checkbox(label="Automatically remove paid GPU attribution and upload model to the Hugging Face Hub after training", value=True, visible=is_advanced_visible) - training_summary_model_name = gr.Textbox(label="Name of your model", visible=True) - training_summary_where_to_upload = gr.Dropdown(["My personal profile", "Public Library"], value="My personal profile", label="Upload to", visible=True) - training_summary_token_message = gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.", visible=True) - training_summary_token = gr.Textbox(label="Hugging Face Write Token", type="password", visible=True) - - train_btn = gr.Button("Start Training") - if(is_shared_ui): - training_ongoing = gr.Markdown("## This Space only works in duplicated instances. Please duplicate it and try again!", visible=False) - elif(not is_gpu_associated): - training_ongoing = gr.Markdown("## Oops, you haven't associated your T4 or A10G GPU to this Space. Visit the Settings tab, associate and try again.", visible=False) - else: - training_ongoing = gr.Markdown("## Training is ongoing ⌛... You can close this tab if you like or just wait. If you did not check the `Remove GPU After training`, you can come back here to try your model and upload it after training. Don't forget to remove the GPU attribution after you are done. ", visible=False) - - #Post-training UI - completed_training = gr.Markdown('''# ✅ Training completed. - ### Don't forget to remove the GPU attribution after you are done trying and uploading your model''', visible=False) - - with gr.Row(): - with gr.Box(visible=False) as try_your_model: - gr.Markdown("## Try your model") - prompt = gr.Textbox(label="Type your prompt") - result_image = gr.Image() - inference_steps = gr.Slider(minimum=1, maximum=150, value=50, step=1) - generate_button = gr.Button("Generate Image") - - with gr.Box(visible=False) as push_to_hub: - gr.Markdown("## Push to Hugging Face Hub") - model_name = gr.Textbox(label="Name of your model", placeholder="Tarsila do Amaral Style") - where_to_upload = gr.Dropdown(["My personal profile", "Public Library"], label="Upload to") - gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.") - hf_token = gr.Textbox(label="Hugging Face Write Token", type="password") - - push_button = gr.Button("Push to the Hub") - - result = gr.File(label="Download the uploaded models in the diffusers format", visible=True) - success_message_upload = gr.Markdown(visible=False) - convert_button = gr.Button("Convert to CKPT", visible=False) - - #Swap the examples and the % of text encoder trained depending if it is an object, person or style - type_of_thing.change(fn=swap_text, inputs=[type_of_thing, base_model_to_use], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder, thing_experimental], queue=False, show_progress=False) - - #Swap the base model - base_model_to_use.change(fn=swap_text, inputs=[type_of_thing, base_model_to_use], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder, thing_experimental], queue=False, show_progress=False) - base_model_to_use.change(fn=swap_base_model, inputs=base_model_to_use, outputs=[]) - - #Update the summary box below the UI according to how many images are uploaded and whether users are using custom settings or not - for file in file_collection: - #file.change(fn=update_steps,inputs=file_collection, outputs=steps) - file.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False) - - thing_experimental.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False) - base_model_to_use.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False) - steps.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False) - perc_txt_encoder.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False) - - #Give more options if the user wants to finish everything after training - if(is_spaces): - training_summary_checkbox.change(fn=checkbox_swap, inputs=training_summary_checkbox, outputs=[training_summary_token_message, training_summary_token, training_summary_model_name, training_summary_where_to_upload],queue=False, show_progress=False) - #Add a message for while it is in training - train_btn.click(lambda:gr.update(visible=True), inputs=None, outputs=training_ongoing) - - #The main train function - train_btn.click(fn=train, inputs=is_visible+concept_collection+file_collection+[base_model_to_use]+[thing_experimental]+[training_summary_where_to_upload]+[training_summary_model_name]+[training_summary_checkbox]+[training_summary_token]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[result, try_your_model, push_to_hub, convert_button, training_ongoing, completed_training], queue=False) - - #Button to generate an image from your trained model after training - generate_button.click(fn=generate, inputs=[prompt, inference_steps], outputs=result_image, queue=False) - #Button to push the model to the Hugging Face Hub - push_button.click(fn=push, inputs=[model_name, where_to_upload, hf_token, base_model_to_use], outputs=[success_message_upload, result], queue=False) - #Button to convert the model to ckpt format - convert_button.click(fn=convert_to_ckpt, inputs=[], outputs=result, queue=False) - - #Checks if the training is running - demo.load(fn=check_status, inputs=top_description, outputs=[top_description, try_your_model, push_to_hub, result, convert_button], queue=False, show_progress=False) - -demo.queue(default_enabled=False).launch(debug=True) \ No newline at end of file diff --git a/spaces/hugginglearners/image-style-transfer/utils.py b/spaces/hugginglearners/image-style-transfer/utils.py deleted file mode 100644 index a6171f7990b3946a68c41b80679845984c34c071..0000000000000000000000000000000000000000 --- a/spaces/hugginglearners/image-style-transfer/utils.py +++ /dev/null @@ -1,141 +0,0 @@ -import math -import numpy as np -import pandas as pd - -import gradio as gr -from huggingface_hub import from_pretrained_fastai -from fastai.vision.all import * -from torchvision.models import vgg19, vgg16 - -pascal_source = '.' -EXAMPLES_PATH = Path('/content/examples') -repo_id = "hugginglearners/fastai-style-transfer" - -def get_stl_fs(fs): return fs[:-1] - -def style_loss(inp:Tensor, out_feat:Tensor): - "Calculate style loss, assumes we have `im_grams`" - # Get batch size - bs = inp[0].shape[0] - loss = [] - # For every item in our inputs - for y, f in zip(*map(get_stl_fs, [im_grams, inp])): - # Calculate MSE - loss.append(F.mse_loss(y.repeat(bs, 1, 1), gram(f))) - # Multiply their sum by 30000 - return 3e5 * sum(loss) - -class FeatureLoss(Module): - "Combines two losses and features into a useable loss function" - def __init__(self, feats, style_loss, act_loss, hooks, feat_net): - store_attr() - self.hooks = hooks - self.feat_net = feat_net - self.reset_metrics() - - def forward(self, pred, targ): - # First get the features of our prediction and target - pred_feat, targ_feat = self.feats(self.feat_net, self.hooks, pred), self.feats(self.feat_net, self.hooks, targ) - # Calculate style and activation loss - style_loss = self.style_loss(pred_feat, targ_feat) - act_loss = self.act_loss(pred_feat, targ_feat) - # Store the loss - self._add_loss(style_loss, act_loss) - # Return the sum - return style_loss + act_loss - - def reset_metrics(self): - # Generates a blank metric - self.metrics = dict(style = [], content = []) - - def _add_loss(self, style_loss, act_loss): - # Add to our metrics - self.metrics['style'].append(style_loss) - self.metrics['content'].append(act_loss) - -def act_loss(inp:Tensor, targ:Tensor): - "Calculate the MSE loss of the activation layers" - return F.mse_loss(inp[-1], targ[-1]) - -class ReflectionLayer(Module): - "A series of Reflection Padding followed by a ConvLayer" - def __init__(self, in_channels, out_channels, ks=3, stride=2): - reflection_padding = ks // 2 - self.reflection_pad = nn.ReflectionPad2d(reflection_padding) - self.conv2d = nn.Conv2d(in_channels, out_channels, ks, stride) - - def forward(self, x): - out = self.reflection_pad(x) - out = self.conv2d(out) - return out - -class ResidualBlock(Module): - "Two reflection layers and an added activation function with residual" - def __init__(self, channels): - self.conv1 = ReflectionLayer(channels, channels, ks=3, stride=1) - self.in1 = nn.InstanceNorm2d(channels, affine=True) - self.conv2 = ReflectionLayer(channels, channels, ks=3, stride=1) - self.in2 = nn.InstanceNorm2d(channels, affine=True) - self.relu = nn.ReLU() - - def forward(self, x): - residual = x - out = self.relu(self.in1(self.conv1(x))) - out = self.in2(self.conv2(out)) - out = out + residual - return out - -class UpsampleConvLayer(Module): - "Upsample with a ReflectionLayer" - def __init__(self, in_channels, out_channels, ks=3, stride=1, upsample=None): - self.upsample = upsample - reflection_padding = ks // 2 - self.reflection_pad = nn.ReflectionPad2d(reflection_padding) - self.conv2d = nn.Conv2d(in_channels, out_channels, ks, stride) - - def forward(self, x): - x_in = x - if self.upsample: - x_in = torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample) - out = self.reflection_pad(x_in) - out = self.conv2d(out) - return out - -class TransformerNet(Module): - "A simple network for style transfer" - def __init__(self): - # Initial convolution layers - self.conv1 = ReflectionLayer(3, 32, ks=9, stride=1) - self.in1 = nn.InstanceNorm2d(32, affine=True) - self.conv2 = ReflectionLayer(32, 64, ks=3, stride=2) - self.in2 = nn.InstanceNorm2d(64, affine=True) - self.conv3 = ReflectionLayer(64, 128, ks=3, stride=2) - self.in3 = nn.InstanceNorm2d(128, affine=True) - # Residual layers - self.res1 = ResidualBlock(128) - self.res2 = ResidualBlock(128) - self.res3 = ResidualBlock(128) - self.res4 = ResidualBlock(128) - self.res5 = ResidualBlock(128) - # Upsampling Layers - self.deconv1 = UpsampleConvLayer(128, 64, ks=3, stride=1, upsample=2) - self.in4 = nn.InstanceNorm2d(64, affine=True) - self.deconv2 = UpsampleConvLayer(64, 32, ks=3, stride=1, upsample=2) - self.in5 = nn.InstanceNorm2d(32, affine=True) - self.deconv3 = ReflectionLayer(32, 3, ks=9, stride=1) - # Non-linearities - self.relu = nn.ReLU() - - def forward(self, X): - y = self.relu(self.in1(self.conv1(X))) - y = self.relu(self.in2(self.conv2(y))) - y = self.relu(self.in3(self.conv3(y))) - y = self.res1(y) - y = self.res2(y) - y = self.res3(y) - y = self.res4(y) - y = self.res5(y) - y = self.relu(self.in4(self.deconv1(y))) - y = self.relu(self.in5(self.deconv2(y))) - y = self.deconv3(y) - return y diff --git a/spaces/ifey/chatdemo/gradiodemo/Demo/Login/3.py b/spaces/ifey/chatdemo/gradiodemo/Demo/Login/3.py deleted file mode 100644 index f81fe8b210a8f65f1267272837668ac4ea5b9b95..0000000000000000000000000000000000000000 --- a/spaces/ifey/chatdemo/gradiodemo/Demo/Login/3.py +++ /dev/null @@ -1,17 +0,0 @@ - - -def login_callback(button): - # 处理登录逻辑 - # if button.clicked(): - # 用户点击了登录按钮 - # 在这里执行登录操作,例如验证用户输入的用户名和密码 - # 如果登录成功,可以执行其他操作或更新界面 - print("User clicked the login button.") - - -import gradio as gr - -with gr.Blocks() as demo: - gr.LoginButton() - -demo.launch() \ No newline at end of file diff --git a/spaces/inflaton/learn-ai/app_modules/llm_inference.py b/spaces/inflaton/learn-ai/app_modules/llm_inference.py deleted file mode 100644 index 15c055eacecb54c0b019e7bb20350900b266952d..0000000000000000000000000000000000000000 --- a/spaces/inflaton/learn-ai/app_modules/llm_inference.py +++ /dev/null @@ -1,110 +0,0 @@ -import abc -import os -import time -import urllib -from queue import Queue -from threading import Thread -from typing import List, Optional - -from langchain.chains.base import Chain - -from app_modules.llm_loader import LLMLoader, TextIteratorStreamer -from app_modules.utils import remove_extra_spaces - - -class LLMInference(metaclass=abc.ABCMeta): - llm_loader: LLMLoader - chain: Chain - - def __init__(self, llm_loader): - self.llm_loader = llm_loader - self.chain = None - - @abc.abstractmethod - def create_chain(self) -> Chain: - pass - - def get_chain(self) -> Chain: - if self.chain is None: - self.chain = self.create_chain() - - return self.chain - - def run_chain(self, chain, inputs, callbacks: Optional[List] = []): - return chain(inputs, callbacks) - - def call_chain( - self, - inputs, - streaming_handler, - q: Queue = None, - testing: bool = False, - ): - print(inputs) - if self.llm_loader.streamer.for_huggingface: - self.llm_loader.lock.acquire() - - try: - self.llm_loader.streamer.reset(q) - - chain = self.get_chain() - result = ( - self._run_chain_with_streaming_handler( - chain, inputs, streaming_handler, testing - ) - if streaming_handler is not None - else self.run_chain(chain, inputs) - ) - - if "answer" in result: - result["answer"] = remove_extra_spaces(result["answer"]) - - base_url = os.environ.get("PDF_FILE_BASE_URL") - if base_url is not None and len(base_url) > 0: - documents = result["source_documents"] - for doc in documents: - source = doc.metadata["source"] - title = source.split("/")[-1] - doc.metadata["url"] = f"{base_url}{urllib.parse.quote(title)}" - - return result - finally: - if self.llm_loader.streamer.for_huggingface: - self.llm_loader.lock.release() - - def _execute_chain(self, chain, inputs, q, sh): - q.put(self.run_chain(chain, inputs, callbacks=[sh])) - - def _run_chain_with_streaming_handler( - self, chain, inputs, streaming_handler, testing - ): - que = Queue() - - t = Thread( - target=self._execute_chain, - args=(chain, inputs, que, streaming_handler), - ) - t.start() - - if self.llm_loader.streamer.for_huggingface: - count = ( - 2 - if "chat_history" in inputs and len(inputs.get("chat_history")) > 0 - else 1 - ) - - while count > 0: - try: - for token in self.llm_loader.streamer: - if not testing: - streaming_handler.on_llm_new_token(token) - - self.llm_loader.streamer.reset() - count -= 1 - except Exception: - if not testing: - print("nothing generated yet - retry in 0.5s") - time.sleep(0.5) - - t.join() - return que.get() diff --git a/spaces/inflaton/learn-ai/unit_test.py b/spaces/inflaton/learn-ai/unit_test.py deleted file mode 100644 index 284bd1b4522674d428f2f4b210735c7a2a8217b4..0000000000000000000000000000000000000000 --- a/spaces/inflaton/learn-ai/unit_test.py +++ /dev/null @@ -1,183 +0,0 @@ -# project/test.py - -import os -import sys -import unittest -from timeit import default_timer as timer - -from langchain.callbacks.base import BaseCallbackHandler -from langchain.schema import HumanMessage - -from app_modules.init import app_init -from app_modules.llm_chat_chain import ChatChain -from app_modules.llm_loader import LLMLoader -from app_modules.utils import get_device_types, print_llm_response - - -class TestLLMLoader(unittest.TestCase): - question = os.environ.get("CHAT_QUESTION") - - def run_test_case(self, llm_model_type, query): - n_threds = int(os.environ.get("NUMBER_OF_CPU_CORES") or "4") - - hf_embeddings_device_type, hf_pipeline_device_type = get_device_types() - print(f"hf_embeddings_device_type: {hf_embeddings_device_type}") - print(f"hf_pipeline_device_type: {hf_pipeline_device_type}") - - llm_loader = LLMLoader(llm_model_type) - start = timer() - llm_loader.init( - n_threds=n_threds, hf_pipeline_device_type=hf_pipeline_device_type - ) - end = timer() - print(f"Model loaded in {end - start:.3f}s") - - result = llm_loader.llm( - [HumanMessage(content=query)] if llm_model_type == "openai" else query - ) - end2 = timer() - print(f"Inference completed in {end2 - end:.3f}s") - print(result) - - def test_openai(self): - self.run_test_case("openai", self.question) - - def test_llamacpp(self): - self.run_test_case("llamacpp", self.question) - - def test_gpt4all_j(self): - self.run_test_case("gpt4all-j", self.question) - - def test_huggingface(self): - self.run_test_case("huggingface", self.question) - - def test_hftgi(self): - self.run_test_case("hftgi", self.question) - - -class TestChatChain(unittest.TestCase): - question = os.environ.get("CHAT_QUESTION") - - def run_test_case(self, llm_model_type, query): - n_threds = int(os.environ.get("NUMBER_OF_CPU_CORES") or "4") - - hf_embeddings_device_type, hf_pipeline_device_type = get_device_types() - print(f"hf_embeddings_device_type: {hf_embeddings_device_type}") - print(f"hf_pipeline_device_type: {hf_pipeline_device_type}") - - llm_loader = LLMLoader(llm_model_type) - start = timer() - llm_loader.init( - n_threds=n_threds, hf_pipeline_device_type=hf_pipeline_device_type - ) - chat = ChatChain(llm_loader) - end = timer() - print(f"Model loaded in {end - start:.3f}s") - - inputs = {"question": query} - result = chat.call_chain(inputs, None) - end2 = timer() - print(f"Inference completed in {end2 - end:.3f}s") - print(result) - - inputs = {"question": "how many people?"} - result = chat.call_chain(inputs, None) - end3 = timer() - print(f"Inference completed in {end3 - end2:.3f}s") - print(result) - - def test_openai(self): - self.run_test_case("openai", self.question) - - def test_llamacpp(self): - self.run_test_case("llamacpp", self.question) - - def test_gpt4all_j(self): - self.run_test_case("gpt4all-j", self.question) - - def test_huggingface(self): - self.run_test_case("huggingface", self.question) - - def test_hftgi(self): - self.run_test_case("hftgi", self.question) - - -class TestQAChain(unittest.TestCase): - qa_chain: any - question = os.environ.get("QA_QUESTION") - - def run_test_case(self, llm_model_type, query): - start = timer() - os.environ["LLM_MODEL_TYPE"] = llm_model_type - qa_chain = app_init()[1] - end = timer() - print(f"App initialized in {end - start:.3f}s") - - chat_history = [] - inputs = {"question": query, "chat_history": chat_history} - result = qa_chain.call_chain(inputs, None) - end2 = timer() - print(f"Inference completed in {end2 - end:.3f}s") - print_llm_response(result) - - chat_history.append((query, result["answer"])) - - inputs = {"question": "tell me more", "chat_history": chat_history} - result = qa_chain.call_chain(inputs, None) - end3 = timer() - print(f"Inference completed in {end3 - end2:.3f}s") - print_llm_response(result) - - def test_openai(self): - self.run_test_case("openai", self.question) - - def test_llamacpp(self): - self.run_test_case("llamacpp", self.question) - - def test_gpt4all_j(self): - self.run_test_case("gpt4all-j", self.question) - - def test_huggingface(self): - self.run_test_case("huggingface", self.question) - - def test_hftgi(self): - self.run_test_case("hftgi", self.question) - - -def chat(): - start = timer() - llm_loader = app_init()[0] - end = timer() - print(f"Model loaded in {end - start:.3f}s") - - chat_chain = ChatChain(llm_loader) - chat_history = [] - - chat_start = timer() - - while True: - query = input("Please enter your question: ") - query = query.strip() - if query.lower() == "exit": - break - - print("\nQuestion: " + query) - - start = timer() - result = chat_chain.call_chain( - {"question": query, "chat_history": chat_history}, None - ) - end = timer() - print(f"Completed in {end - start:.3f}s") - - chat_history.append((query, result["response"])) - - chat_end = timer() - print(f"Total time used: {chat_end - chat_start:.3f}s") - - -if __name__ == "__main__": - if len(sys.argv) > 1 and sys.argv[1] == "chat": - chat() - else: - unittest.main() diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Dark Skin X64 V20.0.2.477 ? Win.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Dark Skin X64 V20.0.2.477 ? Win.md deleted file mode 100644 index d508cd741155789795edaa586a84b8473c1bcada..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Dark Skin X64 V20.0.2.477 ? Win.md +++ /dev/null @@ -1,38 +0,0 @@ -<h2>Dark Skin x64 v20.0.2.477 – Win</h2><br /><p><b><b>DOWNLOAD</b> ::: <a href="https://urlin.us/2uEwri">https://urlin.us/2uEwri</a></b></p><br /><br /> - -0.2.477 - -When I enable it, the startup log shows the skin has no effect. How do I get the skin to work? - -A: - -Make sure your skin path is correct. - -My name is CM0der. I had the same problem before. - -If the skin path is not the same with the skin folder, the skin don't load. - -Example skin path for skin folder is: C:\Users\YOU\Desktop\YourSkinFolder - -Another way to do this is via skin.properties file. - -For more info, see here: Skin Folder - -Cheers. - -On a typical Wednesday, a dozen or so people sit in a small side room of the university's brain lab in uptown Montreal, scrutinizing video footage. Their task? To determine whether other people possess talents or talents are hiding from view. By that measure, it's hard to know who's really exceptional. And yet the stakes are high. - -This is a screening room for the inimitable NeuroLex project, a social experiment that's in its second year. Started by Jean-François Carrier, an artist and neuroscientist at the Collège de Maille, it's currently funded by the government of Quebec's Fondation Générale des Hommes et des Lois. - -"The experiment is simple, but it has a lot of complexity," says Carrier, who leads the work. - -He assigns subjects the job of making other people's faces in the video clips they've watched. It's a job that requires a lot of intuition: the general rule is, if you think someone looks good, you let it shine. But if you're very polite, you probably won't recognize the individual's talent. - -Jean-François Carrier conducts an experiment at the university's brain lab in Montreal. Photo: Jennifer Sherwood - -Carrier's goal, for those volunteering, is to explore the effect of social pressure on the self. As people face judgment, he wants to know if they can sustain their self-confidence or are in danger of losing it. To that end, he made some changes in the experiment. - -There's a panel of judges — made up of people Carrier has never met before and who aren't told anything about the experimental subjects — who have to approve the movies before they're screened. The downside of this is that the subjects aren't really judged by anyone they know, which makes the judgment 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/inreVtussa/clothingai/Examples/Ardfry Psd Codec Keygen 21.md b/spaces/inreVtussa/clothingai/Examples/Ardfry Psd Codec Keygen 21.md deleted file mode 100644 index b01ed5def42be7dd33a22956d62233cc2cd6fa61..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Ardfry Psd Codec Keygen 21.md +++ /dev/null @@ -1,10 +0,0 @@ -<h2>ardfry psd codec keygen 21</h2><br /><p><b><b>Download File</b> ★ <a href="https://tiurll.com/2uCioG">https://tiurll.com/2uCioG</a></b></p><br /><br /> -<br /> -ardfry psd codec keygen June 21, 2011 at 05:53 pm -Download Adobe Audition 3.0 CS3 for Windows without registration. -Adobe Audition 3.0 CS3 is a professional audio editing program in which you can create audio. -This is a powerful audio program to create and process your own. -Adobe Audition 3.0 CS3 [Rus] [2009] Download from a file sharing service. 8a78ff9644<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/inreVtussa/clothingai/Examples/Argox Special Edition Bartender Crack ((EXCLUSIVE)).md b/spaces/inreVtussa/clothingai/Examples/Argox Special Edition Bartender Crack ((EXCLUSIVE)).md deleted file mode 100644 index bb7e50b98973dd7aec07b3c97044bdecb07658d8..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Argox Special Edition Bartender Crack ((EXCLUSIVE)).md +++ /dev/null @@ -1,52 +0,0 @@ -<h2>argox special edition bartender crack</h2><br /><p><b><b>Download File</b> –––––>>> <a href="https://tiurll.com/2uCivu">https://tiurll.com/2uCivu</a></b></p><br /><br /> - -* - - */ - - private static void setTime(String time) { - - try { - - time = time.trim(); - - if (time.length() == 0) - - time = "21:42:33"; - - else - - time = time.replaceAll("[\\W\\s]+", ""); - - - - Calendar now = Calendar.getInstance(); - - now.setTime(new Date()); - - now.set(Calendar.MILLISECOND, 0); - - now.set(Calendar.SECOND, 0); - - now.set(Calendar.MINUTE, 0); - - now.set(Calendar.HOUR_OF_DAY, 0); - - now.set(Calendar.DAY_OF_MONTH, 1); - - DateFormat df = new SimpleDateFormat("HH:mm:ss"); - - Date date = df.parse(time); - - int diff = (int) ((now.getTimeInMillis() - date.getTime())/1000); - - Date today = new Date(); - - today.setTime(new Date()); - - today.set(Calendar.MILLISECOND, 0); - - today.set( 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/inreVtussa/clothingai/Examples/Disparame Que Ya Estoy Muerto.epub.md b/spaces/inreVtussa/clothingai/Examples/Disparame Que Ya Estoy Muerto.epub.md deleted file mode 100644 index 01d1aab4c217ea9e3ebe1413f7aeb69479b02881..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Disparame Que Ya Estoy Muerto.epub.md +++ /dev/null @@ -1,12 +0,0 @@ -<h2>Disparame Que Ya Estoy Muerto.epub</h2><br /><p><b><b>DOWNLOAD</b> >>>>> <a href="https://tiurll.com/2uCipz">https://tiurll.com/2uCipz</a></b></p><br /><br /> - -The first collection of essays, the newly designed Intercontinental English-Speaking Conference, is now in its 20th year and has made a major contribution to the growth of English-speaking literature and language. - -This edition of the journal, now in its 29th volume, is an encyclopedia of poetry, fiction, criticism, and theory that draws on the latest developments in literary studies and scholarship, and features peer-reviewed, original articles, book reviews, special issues, book reviews, and more epub. Play Solitaire anywhere. Once you've downloaded the card game you need for your phone or tablet, you can access it from anywhere through our web app or on your mobile device epub. If you are eager to learn about SAAS Marketing, it is time to start building your pipeline with Google Adwords. - -You can be an expert on this topic if you can master it. This book will provide you with all the necessary tools to start and increase your profitability with Adwords. The essential marketing tool of the future is its ability to attract, interact and retain customers epub. The researchers will investigate the factors which affect team and leader effectiveness in self-managed teams and seek to answer the questions: What makes a team effective? What makes a team leader effective? What makes a team member effective? What makes a leader effective? The goals of the research are to better understand the factors that make teams effective and how leaders can best guide self-managed teams epub. A guide to Online Marketing for Libraries. In this guide, Librarians and library staff will find an overview of online marketing for libraries, as well as best practices and tools for implementing and using marketing strategies to market their library online The 10 Best Self-Published Books of 2015. - -The RDAF is structured around three levels, representing the three Ds of higher education: degree, distance, and direct distance. Degree courses in the first level are traditional, face to face and taking place on campus ref.: The Complete Book of Civil War Logistics (Civil War Logistics, June 15, 2015) While most people want to go to college and get a degree, it's not always possible. Sometimes the educational system is simply too difficult to navigate, or you simply can't afford the tuition, so the next best thing to getting a degree is getting 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/ivntl/MMS/uroman/bin/uroman.pl b/spaces/ivntl/MMS/uroman/bin/uroman.pl deleted file mode 100644 index f1182aee6e5c3422882150b5babeec664b689401..0000000000000000000000000000000000000000 --- a/spaces/ivntl/MMS/uroman/bin/uroman.pl +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/perl -w - -# uroman Nov. 12, 2015 - Apr. 23, 2021 -$version = "v1.2.8"; -# Author: Ulf Hermjakob - -# Usage: uroman.pl {-l [ara|bel|bul|deu|ell|eng|fas|grc|heb|kaz|kir|lav|lit|mkd|mkd2|oss|pnt|rus|srp|srp2|tur|uig|ukr|yid]} {--chart|--offset-mapping} {--no-cache} {--workset} < STDIN -# Example: cat workset.txt | uroman.pl --offset-mapping --workset - -$|=1; - -use FindBin; -use Cwd "abs_path"; -use File::Basename qw(dirname); -use File::Spec; - -my $bin_dir = abs_path(dirname($0)); -my $root_dir = File::Spec->catfile($bin_dir, File::Spec->updir()); -my $data_dir = File::Spec->catfile($root_dir, "data"); -my $lib_dir = File::Spec->catfile($root_dir, "lib"); - -use lib "$FindBin::Bin/../lib"; -use NLP::Chinese; -use NLP::Romanizer; -use NLP::UTF8; -use NLP::utilities; -use JSON; -$chinesePM = NLP::Chinese; -$romanizer = NLP::Romanizer; -$util = NLP::utilities; -%ht = (); -%pinyin_ht = (); -$lang_code = ""; -$return_chart_p = 0; -$return_offset_mappings_p = 0; -$workset_p = 0; -$cache_rom_tokens_p = 1; - -$script_data_filename = File::Spec->catfile($data_dir, "Scripts.txt"); -$unicode_data_overwrite_filename = File::Spec->catfile($data_dir, "UnicodeDataOverwrite.txt"); -$unicode_data_filename = File::Spec->catfile($data_dir, "UnicodeData.txt"); -$romanization_table_filename = File::Spec->catfile($data_dir, "romanization-table.txt"); -$chinese_tonal_pinyin_filename = File::Spec->catfile($data_dir, "Chinese_to_Pinyin.txt"); - -while (@ARGV) { - $arg = shift @ARGV; - if ($arg =~ /^-+(l|lc|lang-code)$/) { - $lang_code = lc (shift @ARGV || "") - } elsif ($arg =~ /^-+chart$/i) { - $return_chart_p = 1; - } elsif ($arg =~ /^-+workset$/i) { - $workset_p = 1; - } elsif ($arg =~ /^-+offset[-_]*map/i) { - $return_offset_mappings_p = 1; - } elsif ($arg =~ /^-+unicode[-_]?data/i) { - $filename = shift @ARGV; - if (-r $filename) { - $unicode_data_filename = $filename; - } else { - print STDERR "Ignoring invalid UnicodeData filename $filename\n"; - } - } elsif ($arg =~ /^-+(no-tok-cach|no-cach)/i) { - $cache_rom_tokens_p = 0; - } else { - print STDERR "Ignoring unrecognized arg $arg\n"; - } -} - -$romanizer->load_script_data(*ht, $script_data_filename); -$romanizer->load_unicode_data(*ht, $unicode_data_filename); -$romanizer->load_unicode_overwrite_romanization(*ht, $unicode_data_overwrite_filename); -$romanizer->load_romanization_table(*ht, $romanization_table_filename); -$chinese_to_pinyin_not_yet_loaded_p = 1; -$current_date = $util->datetime("dateTtime"); -$lang_code_clause = ($lang_code) ? " \"lang-code\":\"$lang_code\",\n" : ""; - -print "{\n \"romanizer\":\"uroman $version (Ulf Hermjakob, USC/ISI)\",\n \"date\":\"$current_date\",\n$lang_code_clause \"romanization\": [\n" if $return_chart_p; -my $line_number = 0; -my $chart_result = ""; -while (<>) { - $line_number++; - my $line = $_; - my $snt_id = ""; - if ($workset_p) { - next if $line =~ /^#/; - if (($i_value, $s_value) = ($line =~ /^(\S+\.\d+)\s(.*)$/)) { - $snt_id = $i_value; - $line = "$s_value\n"; - } else { - next; - } - } - if ($chinese_to_pinyin_not_yet_loaded_p && $chinesePM->string_contains_utf8_cjk_unified_ideograph_p($line)) { - $chinesePM->read_chinese_tonal_pinyin_files(*pinyin_ht, $chinese_tonal_pinyin_filename); - $chinese_to_pinyin_not_yet_loaded_p = 0; - } - if ($return_chart_p) { - print $chart_result; - *chart_ht = $romanizer->romanize($line, $lang_code, "", *ht, *pinyin_ht, 0, "return chart", $line_number); - $chart_result = $romanizer->chart_to_json_romanization_elements(0, $chart_ht{N_CHARS}, *chart_ht, $line_number); - } elsif ($return_offset_mappings_p) { - ($best_romanization, $offset_mappings) = $romanizer->romanize($line, $lang_code, "", *ht, *pinyin_ht, 0, "return offset mappings", $line_number, 0); - print "::snt-id $snt_id\n" if $workset_p; - print "::orig $line"; - print "::rom $best_romanization\n"; - print "::align $offset_mappings\n\n"; - } elsif ($cache_rom_tokens_p) { - print $romanizer->romanize_by_token_with_caching($line, $lang_code, "", *ht, *pinyin_ht, 0, "", $line_number) . "\n"; - } else { - print $romanizer->romanize($line, $lang_code, "", *ht, *pinyin_ht, 0, "", $line_number) . "\n"; - } -} -$chart_result =~ s/,(\s*)$/$1/; -print $chart_result; -print " ]\n}\n" if $return_chart_p; - -$dev_test_p = 0; -if ($dev_test_p) { - $n_suspicious_code_points = 0; - $n_instances = 0; - foreach $char_name (sort { hex($ht{UTF_NAME_TO_UNICODE}->{$a}) <=> hex($ht{UTF_NAME_TO_UNICODE}->{$b}) } - keys %{$ht{SUSPICIOUS_ROMANIZATION}}) { - $unicode_value = $ht{UTF_NAME_TO_UNICODE}->{$char_name}; - $utf8_string = $ht{UTF_NAME_TO_CODE}->{$char_name}; - foreach $romanization (sort keys %{$ht{SUSPICIOUS_ROMANIZATION}->{$char_name}}) { - $count = $ht{SUSPICIOUS_ROMANIZATION}->{$char_name}->{$romanization}; - $s = ($count == 1) ? "" : "s"; - print STDERR "*** Suspiciously lengthy romanization:\n" unless $n_suspicious_code_points; - print STDERR "::s $utf8_string ::t $romanization ::comment $char_name (U+$unicode_value)\n"; - $n_suspicious_code_points++; - $n_instances += $count; - } - } - print STDERR " *** Total of $n_suspicious_code_points suspicious code points ($n_instances instance$s)\n" if $n_suspicious_code_points; -} - -exit 0; - diff --git a/spaces/janshah/demo-app-FALCON40b/app.py b/spaces/janshah/demo-app-FALCON40b/app.py deleted file mode 100644 index f7773c5a00f519db9aac2511d43ecc08d7cfc2d8..0000000000000000000000000000000000000000 --- a/spaces/janshah/demo-app-FALCON40b/app.py +++ /dev/null @@ -1,25 +0,0 @@ -from transformers import AutoTokenizer, AutoModelForCausalLM -import transformers -import torch - -model = "tiiuae/falcon-40b" - -tokenizer = AutoTokenizer.from_pretrained(model) -pipeline = transformers.pipeline( - "text-generation", - model=model, - tokenizer=tokenizer, - torch_dtype=torch.bfloat16, - trust_remote_code=True, - device_map="auto", -) -sequences = pipeline( - "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:", - max_length=200, - do_sample=True, - top_k=10, - num_return_sequences=1, - eos_token_id=tokenizer.eos_token_id, -) -for seq in sequences: - print(f"Result: {seq['generated_text']}") \ No newline at end of file diff --git a/spaces/joaogabriellima/Real-Time-Voice-Cloning/vocoder/hparams.py b/spaces/joaogabriellima/Real-Time-Voice-Cloning/vocoder/hparams.py deleted file mode 100644 index c1de9f7dcc2926735b80a28ed1226ff1b5824753..0000000000000000000000000000000000000000 --- a/spaces/joaogabriellima/Real-Time-Voice-Cloning/vocoder/hparams.py +++ /dev/null @@ -1,44 +0,0 @@ -from synthesizer.hparams import hparams as _syn_hp - - -# Audio settings------------------------------------------------------------------------ -# Match the values of the synthesizer -sample_rate = _syn_hp.sample_rate -n_fft = _syn_hp.n_fft -num_mels = _syn_hp.num_mels -hop_length = _syn_hp.hop_size -win_length = _syn_hp.win_size -fmin = _syn_hp.fmin -min_level_db = _syn_hp.min_level_db -ref_level_db = _syn_hp.ref_level_db -mel_max_abs_value = _syn_hp.max_abs_value -preemphasis = _syn_hp.preemphasis -apply_preemphasis = _syn_hp.preemphasize - -bits = 9 # bit depth of signal -mu_law = True # Recommended to suppress noise if using raw bits in hp.voc_mode - # below - - -# WAVERNN / VOCODER -------------------------------------------------------------------------------- -voc_mode = 'RAW' # either 'RAW' (softmax on raw bits) or 'MOL' (sample from -# mixture of logistics) -voc_upsample_factors = (5, 5, 8) # NB - this needs to correctly factorise hop_length -voc_rnn_dims = 512 -voc_fc_dims = 512 -voc_compute_dims = 128 -voc_res_out_dims = 128 -voc_res_blocks = 10 - -# Training -voc_batch_size = 100 -voc_lr = 1e-4 -voc_gen_at_checkpoint = 5 # number of samples to generate at each checkpoint -voc_pad = 2 # this will pad the input so that the resnet can 'see' wider - # than input length -voc_seq_len = hop_length * 5 # must be a multiple of hop_length - -# Generating / Synthesizing -voc_gen_batched = True # very fast (realtime+) single utterance batched generation -voc_target = 8000 # target number of samples to be generated in each batch entry -voc_overlap = 400 # number of samples for crossfading between batches diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/components/duplicate_button.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/components/duplicate_button.py deleted file mode 100644 index c6b8f486a9cf348a2a9a65da75970ea92b394087..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/components/duplicate_button.py +++ /dev/null @@ -1,79 +0,0 @@ -""" Predefined buttons with bound events that can be included in a gr.Blocks for convenience. """ - -from __future__ import annotations - -from typing import Literal - -from gradio_client.documentation import document, set_documentation_group - -from gradio.components import Button -from gradio.utils import get_space - -set_documentation_group("component") - - -@document() -class DuplicateButton(Button): - """ - Button that triggers a Spaces Duplication, when the demo is on Hugging Face Spaces. Does nothing locally. - Preprocessing: passes the button value as a {str} into the function - Postprocessing: expects a {str} to be returned from a function, which is set as the label of the button - """ - - is_template = True - - def __init__( - self, - *, - value: str = "Duplicate Space", - variant: Literal["primary", "secondary", "stop"] = "secondary", - size: Literal["sm", "lg"] | None = "sm", - icon: str | None = None, - link: str | None = None, - visible: bool = True, - interactive: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - scale: int | None = 0, - min_width: int | None = None, - _activate: bool = True, - **kwargs, - ): - """ - Parameters: - value: Default text for the button to display. If callable, the function will be called whenever the app loads to set the initial value of the component. - variant: 'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button. - size: Size of the button. Can be "sm" or "lg". - icon: URL or path to the icon file to display within the button. If None, no icon will be displayed. - link: URL to open when the button is clicked. If None, no link will be used. - visible: If False, component will be hidden. - interactive: If False, the Button will be in a disabled state. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. - min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. - """ - super().__init__( - value, - variant=variant, - size=size, - icon=icon, - link=link, - visible=visible, - interactive=interactive, - elem_id=elem_id, - elem_classes=elem_classes, - scale=scale, - min_width=min_width, - **kwargs, - ) - if _activate: - self.activate() - - def activate(self): - space_name = get_space() - if space_name is not None: - self.click( - fn=None, - _js=f"() => {{ window.open(`https://huggingface.co/spaces/{space_name}?duplicate=true`, '_blank') }}", - ) diff --git a/spaces/johko/capdec-image-captioning/app.py b/spaces/johko/capdec-image-captioning/app.py deleted file mode 100644 index 1db62f0eb385a42647300e3f2ca4f413ac8449af..0000000000000000000000000000000000000000 --- a/spaces/johko/capdec-image-captioning/app.py +++ /dev/null @@ -1,87 +0,0 @@ -import gradio as gr -import clip -from model import ClipCaptionModel -from transformers import GPT2Tokenizer -import numpy as np -import torch -import PIL -from predict import generate2, generate_beam -from huggingface_hub import hf_hub_download - -D = torch.device -CPU = torch.device('cpu') -pretrained_model_variance = "0.015" -device = "cpu" -model_path = hf_hub_download('johko/capdec_015', 'model.pt') - -clip_model, preprocess = clip.load("RN50x4", device=device, jit=False) -tokenizer = GPT2Tokenizer.from_pretrained("gpt2") - -model_0 = hf_hub_download('johko/capdec_0', 'model.pt') -model_001 = hf_hub_download('johko/capdec_001', 'model.pt') -model_005 = hf_hub_download('johko/capdec_005', 'model.pt') -model_015 = hf_hub_download('johko/capdec_015', 'model.pt') -model_025 = hf_hub_download('johko/capdec_025', 'model.pt') -model_05 = hf_hub_download('johko/capdec_05', 'model.pt') - - -def load_noise_level_model(noise_level): - if noise_level == "0.0": - model_path = model_0 - elif noise_level == "0.001": - model_path = model_001 - elif noise_level == "0.005": - model_path = model_005 - elif noise_level == "0.015": - model_path = model_015 - elif noise_level == "0.025": - model_path = model_025 - elif noise_level == "0.05": - model_path = model_05 - else: - raise ValueError("Unknown Noise Level") - - model = ClipCaptionModel() - model.load_state_dict(torch.load(model_path, map_location=CPU)) - model = model.eval() - model = model.to(device) - - return model - -def infer(input_image: np.ndarray, noise_level: str): - use_beam_search = True - - model = load_noise_level_model(noise_level) - - pil_image = PIL.Image.fromarray(input_image) - - image = preprocess(pil_image).unsqueeze(0).to(device) - with torch.no_grad(): - prefix = clip_model.encode_image(image).to(device, dtype=torch.float32) - prefix_embed = model.clip_project(prefix).reshape(1, 40, -1) - if use_beam_search: - generated_text_prefix = generate_beam(model, tokenizer, embed=prefix_embed)[0] - else: - generated_text_prefix = generate2(model, tokenizer, embed=prefix_embed) - - return input_image, generated_text_prefix - -description="""This space is a demo for the paper [*Text-Only Training for Image Captioning using Noise-Injected CLIP*](https://arxiv.org/pdf/2211.00575.pdf) -by David Nukrai, Ron Mokady and Amir Globerson. - -The paper is about training an Image Captioning model by only using text. It leverages the usage of noise injections at different Noise Levels, -with which you can experiment as well in this demo. The text caption will change depending on the Noise Level you choose.""" - -dropdown = gr.components.Dropdown(["0.0", "0.001", "0.005", "0.015", "0.025", "0.05"], value="0.015", label="Noise Level") -input_image = gr.components.Image(label="Input Image") -output_image = gr.components.Image(label="Image") -output_text = gr.components.Textbox(label="Generated Caption") - -iface = gr.Interface( - title="CapDec Image Captioning", - description=description, - fn=infer, - inputs=[input_image, dropdown], - outputs=[output_image, output_text], - examples=[["examples/flickr_ex2.jpg", "0.015"], ["examples/web_ex3.jpeg", "0.015"]]) -iface.launch() \ No newline at end of file diff --git a/spaces/johnslegers/stable-diffusion-gui-test/sd_internal/__init__.py b/spaces/johnslegers/stable-diffusion-gui-test/sd_internal/__init__.py deleted file mode 100644 index f2a9901f080ef6a837dccaee1548ccf9b661dba3..0000000000000000000000000000000000000000 --- a/spaces/johnslegers/stable-diffusion-gui-test/sd_internal/__init__.py +++ /dev/null @@ -1,107 +0,0 @@ -import json - -class Request: - session_id: str = "session" - prompt: str = "" - negative_prompt: str = "" - init_image: str = None # base64 - mask: str = None # base64 - num_outputs: int = 1 - num_inference_steps: int = 50 - guidance_scale: float = 7.5 - width: int = 512 - height: int = 512 - seed: int = 42 - prompt_strength: float = 0.8 - sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms" - # allow_nsfw: bool = False - precision: str = "autocast" # or "full" - save_to_disk_path: str = None - turbo: bool = True - use_cpu: bool = False - use_full_precision: bool = False - use_face_correction: str = None # or "GFPGANv1.3" - use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B" - use_stable_diffusion_model: str = "sd-v1-4" - show_only_filtered_image: bool = False - output_format: str = "jpeg" # or "png" - - stream_progress_updates: bool = False - stream_image_progress: bool = False - - def json(self): - return { - "session_id": self.session_id, - "prompt": self.prompt, - "negative_prompt": self.negative_prompt, - "num_outputs": self.num_outputs, - "num_inference_steps": self.num_inference_steps, - "guidance_scale": self.guidance_scale, - "width": self.width, - "height": self.height, - "seed": self.seed, - "prompt_strength": self.prompt_strength, - "sampler": self.sampler, - "use_face_correction": self.use_face_correction, - "use_upscale": self.use_upscale, - "use_stable_diffusion_model": self.use_stable_diffusion_model, - "output_format": self.output_format, - } - - def to_string(self): - return f''' - session_id: {self.session_id} - prompt: {self.prompt} - negative_prompt: {self.negative_prompt} - seed: {self.seed} - num_inference_steps: {self.num_inference_steps} - sampler: {self.sampler} - guidance_scale: {self.guidance_scale} - w: {self.width} - h: {self.height} - precision: {self.precision} - save_to_disk_path: {self.save_to_disk_path} - turbo: {self.turbo} - use_cpu: {self.use_cpu} - use_full_precision: {self.use_full_precision} - use_face_correction: {self.use_face_correction} - use_upscale: {self.use_upscale} - use_stable_diffusion_model: {self.use_stable_diffusion_model} - show_only_filtered_image: {self.show_only_filtered_image} - output_format: {self.output_format} - - stream_progress_updates: {self.stream_progress_updates} - stream_image_progress: {self.stream_image_progress}''' - -class Image: - data: str # base64 - seed: int - is_nsfw: bool - path_abs: str = None - - def __init__(self, data, seed): - self.data = data - self.seed = seed - - def json(self): - return { - "data": self.data, - "seed": self.seed, - "path_abs": self.path_abs, - } - -class Response: - request: Request - images: list - - def json(self): - res = { - "status": 'succeeded', - "request": self.request.json(), - "output": [], - } - - for image in self.images: - res["output"].append(image.json()) - - return res diff --git a/spaces/jolucas/llm_lab/app.py b/spaces/jolucas/llm_lab/app.py deleted file mode 100644 index ed364324b24448d79ff65ef351e921f7807ef576..0000000000000000000000000000000000000000 --- a/spaces/jolucas/llm_lab/app.py +++ /dev/null @@ -1,49 +0,0 @@ -import gradio as gr -import requests -import os -import json - -API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom" - -def translate(user, seed = 42): - prompt = f"Instruction: Details: {os.environ['secret_thing']}. Given the following English input sentence translate it into a Spanish sentence. \ninput: {user}" - data = { - "inputs": prompt, - "parameters": { - "top_p": 0.9, - "temperature": 0.1, - "max_new_tokens": 250, - "return_full_text": False, - "do_sample": False, - "seed": seed, - "early_stopping": False, - "length_penalty": 0.0, - "eos_token_id": None, - }, - "options": { - "use_cache": False, - "wait_for_model": True, - }, - } - response = requests.request("POST", API_URL, json=data) - output = json.loads(response.content.decode("utf-8")) - output_tmp = output[0]['generated_text'] - answer = output_tmp.splitlines() - try: - return list(filter(lambda x: "output" in x, answer))[0] - except IndexError: - return ["no entiendo"] - -demo = gr.Blocks() - -with demo: - input_prompt = gr.Textbox(label="Enter the sentence : ", - value=f"", - lines=6) - - generated_txt = gr.Textbox(lines=3) - - b1 = gr.Button("translate") - b1.click(translate,inputs=[input_prompt], outputs=generated_txt) - -demo.launch(enable_queue=True, debug=False) diff --git a/spaces/jordonpeter01/ai-comic-factory/src/lib/cleanJson.ts b/spaces/jordonpeter01/ai-comic-factory/src/lib/cleanJson.ts deleted file mode 100644 index 8e914d329008deae4e14679597a76ca352b64925..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/ai-comic-factory/src/lib/cleanJson.ts +++ /dev/null @@ -1,19 +0,0 @@ -import { dirtyLLMResponseCleaner } from "./dirtyLLMResponseCleaner" - -export function cleanJson(input: string) { - - if (input.includes('```')) { - input = input.split('```')[0] - } - let tmp = dirtyLLMResponseCleaner(input) - - // we only keep what's after the first [ - tmp = `[${tmp.split("[").pop() || ""}` - - // and before the first ] - tmp = `${tmp.split("]").shift() || ""}]` - - tmp = dirtyLLMResponseCleaner(tmp) - - return tmp -} \ No newline at end of file diff --git a/spaces/josedolot/HybridNet_Demo2/encoders/xception.py b/spaces/josedolot/HybridNet_Demo2/encoders/xception.py deleted file mode 100644 index 4d106e160263623d2febb258efc26d4bfde8cbe9..0000000000000000000000000000000000000000 --- a/spaces/josedolot/HybridNet_Demo2/encoders/xception.py +++ /dev/null @@ -1,66 +0,0 @@ -import re -import torch.nn as nn - -from pretrainedmodels.models.xception import pretrained_settings -from pretrainedmodels.models.xception import Xception - -from ._base import EncoderMixin - - -class XceptionEncoder(Xception, EncoderMixin): - - def __init__(self, out_channels, *args, depth=5, **kwargs): - super().__init__(*args, **kwargs) - - self._out_channels = out_channels - self._depth = depth - self._in_channels = 3 - - # modify padding to maintain output shape - self.conv1.padding = (1, 1) - self.conv2.padding = (1, 1) - - del self.fc - - def make_dilated(self, stage_list, dilation_list): - raise ValueError("Xception encoder does not support dilated mode " - "due to pooling operation for downsampling!") - - def get_stages(self): - return [ - nn.Identity(), - nn.Sequential(self.conv1, self.bn1, self.relu, self.conv2, self.bn2, self.relu), - self.block1, - self.block2, - nn.Sequential(self.block3, self.block4, self.block5, self.block6, self.block7, - self.block8, self.block9, self.block10, self.block11), - nn.Sequential(self.block12, self.conv3, self.bn3, self.relu, self.conv4, self.bn4), - ] - - def forward(self, x): - stages = self.get_stages() - - features = [] - for i in range(self._depth + 1): - x = stages[i](x) - features.append(x) - - return features - - def load_state_dict(self, state_dict): - # remove linear - state_dict.pop('fc.bias', None) - state_dict.pop('fc.weight', None) - - super().load_state_dict(state_dict) - - -xception_encoders = { - 'xception': { - 'encoder': XceptionEncoder, - 'pretrained_settings': pretrained_settings['xception'], - 'params': { - 'out_channels': (3, 64, 128, 256, 728, 2048), - } - }, -} diff --git a/spaces/justest/gpt4free/g4f/Provider/Providers/Ails.py b/spaces/justest/gpt4free/g4f/Provider/Providers/Ails.py deleted file mode 100644 index 1a14b2e9aec50328b7b21d5980bd67c5eaee2b3a..0000000000000000000000000000000000000000 --- a/spaces/justest/gpt4free/g4f/Provider/Providers/Ails.py +++ /dev/null @@ -1,91 +0,0 @@ -import os -import time -import json -import uuid -import random -import hashlib -import requests - -from ...typing import sha256, Dict, get_type_hints -from datetime import datetime - -url: str = 'https://ai.ls' -model: str = 'gpt-3.5-turbo' -supports_stream = True -needs_auth = False - -class Utils: - def hash(json_data: Dict[str, str]) -> sha256: - - secretKey: bytearray = bytearray([79, 86, 98, 105, 91, 84, 80, 78, 123, 83, - 35, 41, 99, 123, 51, 54, 37, 57, 63, 103, 59, 117, 115, 108, 41, 67, 76]) - - base_string: str = '%s:%s:%s:%s' % ( - json_data['t'], - json_data['m'], - 'WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf', - len(json_data['m']) - ) - - return hashlib.sha256(base_string.encode()).hexdigest() - - def format_timestamp(timestamp: int) -> str: - - e = timestamp - n = e % 10 - r = n + 1 if n % 2 == 0 else n - return str(e - n + r) - - -def _create_completion(model: str, messages: list, temperature: float = 0.6, stream: bool = False, **kwargs): - - headers = { - 'authority': 'api.caipacity.com', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'authorization': 'Bearer free', - 'client-id': str(uuid.uuid4()), - 'client-v': '0.1.217', - 'content-type': 'application/json', - 'origin': 'https://ai.ls', - 'referer': 'https://ai.ls/', - 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"Windows"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'cross-site', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', - } - - params = { - 'full': 'false', - } - - timestamp = Utils.format_timestamp(int(time.time() * 1000)) - - sig = { - 'd': datetime.now().strftime('%Y-%m-%d'), - 't': timestamp, - 's': Utils.hash({ - 't': timestamp, - 'm': messages[-1]['content']})} - - json_data = json.dumps(separators=(',', ':'), obj={ - 'model': 'gpt-3.5-turbo', - 'temperature': 0.6, - 'stream': True, - 'messages': messages} | sig) - - response = requests.post('https://api.caipacity.com/v1/chat/completions', - headers=headers, data=json_data, stream=True) - - for token in response.iter_lines(): - if b'content' in token: - completion_chunk = json.loads(token.decode().replace('data: ', '')) - token = completion_chunk['choices'][0]['delta'].get('content') - if token != None: - yield token - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/justest/gpt4free/testing/binghuan/helpers/binghuan.py b/spaces/justest/gpt4free/testing/binghuan/helpers/binghuan.py deleted file mode 100644 index 203bbe45747a997760a3995a2d311ae5d9f1e716..0000000000000000000000000000000000000000 --- a/spaces/justest/gpt4free/testing/binghuan/helpers/binghuan.py +++ /dev/null @@ -1,221 +0,0 @@ -# Original Code From : https://gitler.moe/g4f/gpt4free -# https://gitler.moe/g4f/gpt4free/src/branch/main/g4f/Provider/Providers/helpers/bing.py -import sys -import ssl -import uuid -import json -import time -import random -import asyncio -import certifi -# import requests -from curl_cffi import requests -import websockets -import browser_cookie3 - -config = json.loads(sys.argv[1]) - -ssl_context = ssl.create_default_context() -ssl_context.load_verify_locations(certifi.where()) - - - -conversationstyles = { - 'gpt-4': [ #'precise' - "nlu_direct_response_filter", - "deepleo", - "disable_emoji_spoken_text", - "responsible_ai_policy_235", - "enablemm", - "h3precise", - "rcsprtsalwlst", - "dv3sugg", - "autosave", - "clgalileo", - "gencontentv3" - ], - 'balanced': [ - "nlu_direct_response_filter", - "deepleo", - "disable_emoji_spoken_text", - "responsible_ai_policy_235", - "enablemm", - "harmonyv3", - "rcsprtsalwlst", - "dv3sugg", - "autosave" - ], - 'gpt-3.5-turbo': [ #'precise' - "nlu_direct_response_filter", - "deepleo", - "disable_emoji_spoken_text", - "responsible_ai_policy_235", - "enablemm", - "h3imaginative", - "rcsprtsalwlst", - "dv3sugg", - "autosave", - "gencontentv3" - ] -} - -def format(msg: dict) -> str: - return json.dumps(msg) + '\x1e' - -def get_token(): - return - - try: - cookies = {c.name: c.value for c in browser_cookie3.edge(domain_name='bing.com')} - return cookies['_U'] - except: - print('Error: could not find bing _U cookie in edge browser.') - exit(1) - -class AsyncCompletion: - async def create( - prompt : str = None, - optionSets : list = None, - token : str = None): # No auth required anymore - - create = None - for _ in range(5): - try: - create = requests.get('https://b.ai-huan.xyz/turing/conversation/create', - headers = { - 'host': 'b.ai-huan.xyz', - 'accept-encoding': 'gzip, deflate, br', - 'connection': 'keep-alive', - 'authority': 'b.ai-huan.xyz', - 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', - 'accept-language': 'en-US,en;q=0.9', - 'cache-control': 'max-age=0', - 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', - 'sec-ch-ua-arch': '"x86"', - 'sec-ch-ua-bitness': '"64"', - 'sec-ch-ua-full-version': '"110.0.1587.69"', - 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-model': '""', - 'sec-ch-ua-platform': '"Windows"', - 'sec-ch-ua-platform-version': '"15.0.0"', - 'sec-fetch-dest': 'document', - 'sec-fetch-mode': 'navigate', - 'sec-fetch-site': 'none', - 'sec-fetch-user': '?1', - 'upgrade-insecure-requests': '1', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', - 'x-edge-shopping-flag': '1', - 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}' - } - ) - - conversationId = create.json()['conversationId'] - clientId = create.json()['clientId'] - conversationSignature = create.json()['conversationSignature'] - - except Exception as e: - time.sleep(0.5) - continue - - if create == None: raise Exception('Failed to create conversation.') - - wss: websockets.WebSocketClientProtocol or None = None - - wss = await websockets.connect('wss://sydney.vcanbb.chat/sydney/ChatHub', max_size = None, ssl = ssl_context, - extra_headers = { - 'accept': 'application/json', - 'accept-language': 'en-US,en;q=0.9', - 'content-type': 'application/json', - 'sec-ch-ua': '"Not_A Brand";v="99", Microsoft Edge";v="110", "Chromium";v="110"', - 'sec-ch-ua-arch': '"x86"', - 'sec-ch-ua-bitness': '"64"', - 'sec-ch-ua-full-version': '"109.0.1518.78"', - 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-model': "", - 'sec-ch-ua-platform': '"Windows"', - 'sec-ch-ua-platform-version': '"15.0.0"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'x-ms-client-request-id': str(uuid.uuid4()), - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - 'Referer': 'https://b.ai-huan.xyz/search?q=Bing+AI&showconv=1&FORM=hpcodx', - 'Referrer-Policy': 'origin-when-cross-origin', - 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}' - } - ) - - await wss.send(format({'protocol': 'json', 'version': 1})) - await wss.recv() - - struct = { - 'arguments': [ - { - 'source': 'cib', - 'optionsSets': optionSets, - 'isStartOfSession': True, - 'message': { - 'author': 'user', - 'inputMethod': 'Keyboard', - 'text': prompt, - 'messageType': 'Chat' - }, - 'conversationSignature': conversationSignature, - 'participant': { - 'id': clientId - }, - 'conversationId': conversationId - } - ], - 'invocationId': '0', - 'target': 'chat', - 'type': 4 - } - - await wss.send(format(struct)) - - base_string = '' - - final = False - while not final: - objects = str(await wss.recv()).split('\x1e') - for obj in objects: - if obj is None or obj == '': - continue - - response = json.loads(obj) - #print(response, flush=True, end='') - if response.get('type') == 1 and response['arguments'][0].get('messages',): - response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text') - - yield (response_text.replace(base_string, '')) - base_string = response_text - - elif response.get('type') == 2: - final = True - - await wss.close() - -# i thing bing realy donset understand multi message (based on prompt template) -def convert(messages): - context = "" - for message in messages: - context += "[%s](#message)\n%s\n\n" % (message['role'], - message['content']) - return context - -async def run(optionSets, messages): - prompt = messages[-1]['content'] - if(len(messages) > 1): - prompt = convert(messages) - async for value in AsyncCompletion.create(prompt=prompt, optionSets=optionSets): - try: - print(value, flush=True, end='') - except UnicodeEncodeError as e: - # emoji encoding problem - print(value.encode('utf-8'), flush=True, end='') - -optionSet = conversationstyles[config['model']] -asyncio.run(run(optionSet, config['messages'])) \ No newline at end of file diff --git a/spaces/kcagle/AutoGPT/autogpt/memory/pinecone.py b/spaces/kcagle/AutoGPT/autogpt/memory/pinecone.py deleted file mode 100644 index 27fcd62482d0cf44e02fa1c339195be58cb745b0..0000000000000000000000000000000000000000 --- a/spaces/kcagle/AutoGPT/autogpt/memory/pinecone.py +++ /dev/null @@ -1,75 +0,0 @@ -import pinecone -from colorama import Fore, Style - -from autogpt.llm_utils import create_embedding_with_ada -from autogpt.logs import logger -from autogpt.memory.base import MemoryProviderSingleton - - -class PineconeMemory(MemoryProviderSingleton): - def __init__(self, cfg): - pinecone_api_key = cfg.pinecone_api_key - pinecone_region = cfg.pinecone_region - pinecone.init(api_key=pinecone_api_key, environment=pinecone_region) - dimension = 1536 - metric = "cosine" - pod_type = "p1" - table_name = "auto-gpt" - # this assumes we don't start with memory. - # for now this works. - # we'll need a more complicated and robust system if we want to start with - # memory. - self.vec_num = 0 - - try: - pinecone.whoami() - except Exception as e: - logger.typewriter_log( - "FAILED TO CONNECT TO PINECONE", - Fore.RED, - Style.BRIGHT + str(e) + Style.RESET_ALL, - ) - logger.double_check( - "Please ensure you have setup and configured Pinecone properly for use." - + f"You can check out {Fore.CYAN + Style.BRIGHT}" - "https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup" - f"{Style.RESET_ALL} to ensure you've set up everything correctly." - ) - exit(1) - - if table_name not in pinecone.list_indexes(): - pinecone.create_index( - table_name, dimension=dimension, metric=metric, pod_type=pod_type - ) - self.index = pinecone.Index(table_name) - - def add(self, data): - vector = create_embedding_with_ada(data) - # no metadata here. We may wish to change that long term. - self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})]) - _text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}" - self.vec_num += 1 - return _text - - def get(self, data): - return self.get_relevant(data, 1) - - def clear(self): - self.index.delete(deleteAll=True) - return "Obliviated" - - def get_relevant(self, data, num_relevant=5): - """ - Returns all the data in the memory that is relevant to the given data. - :param data: The data to compare to. - :param num_relevant: The number of relevant data to return. Defaults to 5 - """ - query_embedding = create_embedding_with_ada(data) - results = self.index.query( - query_embedding, top_k=num_relevant, include_metadata=True - ) - sorted_results = sorted(results.matches, key=lambda x: x.score) - return [str(item["metadata"]["raw_text"]) for item in sorted_results] - - def get_stats(self): - return self.index.describe_index_stats() diff --git a/spaces/kcagle/AutoGPT/autogpt/speech/brian.py b/spaces/kcagle/AutoGPT/autogpt/speech/brian.py deleted file mode 100644 index 821fdf2f482a9cfa928e5c9680152ad6766d8326..0000000000000000000000000000000000000000 --- a/spaces/kcagle/AutoGPT/autogpt/speech/brian.py +++ /dev/null @@ -1,40 +0,0 @@ -""" Brian speech module for autogpt """ -import os - -import requests -from playsound import playsound - -from autogpt.speech.base import VoiceBase - - -class BrianSpeech(VoiceBase): - """Brian speech module for autogpt""" - - def _setup(self) -> None: - """Setup the voices, API key, etc.""" - pass - - def _speech(self, text: str, _: int = 0) -> bool: - """Speak text using Brian with the streamelements API - - Args: - text (str): The text to speak - - Returns: - bool: True if the request was successful, False otherwise - """ - tts_url = ( - f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}" - ) - response = requests.get(tts_url) - - if response.status_code == 200: - with open("speech.mp3", "wb") as f: - f.write(response.content) - playsound("speech.mp3") - os.remove("speech.mp3") - return True - else: - print("Request failed with status code:", response.status_code) - print("Response content:", response.content) - return False diff --git a/spaces/kdrkdrkdr/LisaTTS/utils.py b/spaces/kdrkdrkdr/LisaTTS/utils.py deleted file mode 100644 index 4cb5b43d0ca2bae496e7871b2094f2ffb26ab642..0000000000000000000000000000000000000000 --- a/spaces/kdrkdrkdr/LisaTTS/utils.py +++ /dev/null @@ -1,226 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.ERROR) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r", encoding="utf-8") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/keithhon/Real-Time-Voice-Cloning/encoder/data_objects/__init__.py b/spaces/keithhon/Real-Time-Voice-Cloning/encoder/data_objects/__init__.py deleted file mode 100644 index ef04ade68544d0477a7f6deb4e7d51e97f592910..0000000000000000000000000000000000000000 --- a/spaces/keithhon/Real-Time-Voice-Cloning/encoder/data_objects/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset -from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataLoader diff --git a/spaces/kenhugs/dsed/README.md b/spaces/kenhugs/dsed/README.md deleted file mode 100644 index f33245438d70d45f292e5df6d2df58eedbe70a1b..0000000000000000000000000000000000000000 --- a/spaces/kenhugs/dsed/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Dsed -emoji: 💻 -colorFrom: gray -colorTo: blue -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/keras-io/conv-lstm/app.py b/spaces/keras-io/conv-lstm/app.py deleted file mode 100644 index c912da5e059ac47aaf6575c3435a498c3f710a1e..0000000000000000000000000000000000000000 --- a/spaces/keras-io/conv-lstm/app.py +++ /dev/null @@ -1,102 +0,0 @@ -import os -import yaml -import gradio as gr -import numpy as np - -import imageio, cv2 -from moviepy.editor import * -from skimage.transform import resize -from skimage import img_as_ubyte -from skimage.color import rgb2gray - -from huggingface_hub.keras_mixin import from_pretrained_keras - -# load model -model = from_pretrained_keras("keras-io/conv-lstm") - -# Examples -samples = [] -example_source = os.listdir('asset/source') -for video in example_source: - samples.append([f'asset/source/{video}', 0.5, True]) - - -def inference(source, - split_pred = 0.4, # predict 0.6% of video - predict_one = False, # Whether to predict a sliding one frame or all frames at once - output_name = 'output.mp4', - output_path = 'asset/output', - cpu = False, - ): - - # source - reader = imageio.get_reader(source) - fps = reader.get_meta_data()['fps'] - source_video = [] - try: - for im in reader: - source_video.append(im) - except RuntimeError: - pass - reader.close() - source_video = [rgb2gray(resize(frame, (64, 64)))[..., np.newaxis] for frame in source_video] - - example = np.array(source_video) - print(example.shape) - # Pick the first/last ten frames from the example. - start_pred_id = int(split_pred * example.shape[0]) # prediction starts from frame start_pred_id - frames = example[:start_pred_id, ...] - original_frames = example[start_pred_id:, ...] - new_predictions = np.zeros(shape=(example.shape[0] - start_pred_id, *frames[0].shape)) - - # Predict a new set of 10 frames. - for i in range(example.shape[0] - start_pred_id): - # Extract the model's prediction and post-process it. - if predict_one: - frames = example[: start_pred_id + i + 1, ...] - else: - frames = np.concatenate((example[: start_pred_id+1 , ...], new_predictions[:i, ...]), axis=0) - new_prediction = model.predict(np.expand_dims(frames, axis=0)) - new_prediction = np.squeeze(new_prediction, axis=0) - predicted_frame = np.expand_dims(new_prediction[-1, ...], axis=0) - - # Extend the set of prediction frames. - new_predictions[i] = predicted_frame - - # Create and save MP4s for each of the ground truth/prediction images. - def postprocess(frame_set, save_file): - # Construct a GIF from the selected video frames. - current_frames = np.squeeze(frame_set) - current_frames = current_frames[..., np.newaxis] * np.ones(3) - current_frames = (current_frames * 255).astype(np.uint8) - current_frames = list(current_frames) - - print(f'{output_path}/{save_file}') - imageio.mimsave(f'{output_path}/{save_file}', current_frames, fps=fps) - - # save video - os.makedirs(output_path, exist_ok=True) - postprocess(original_frames, "original.mp4") - postprocess(new_predictions, output_name) - return f'{output_path}/{output_name}', f'{output_path}/original.mp4' - -article = "<div style='text-align: center;'><a href='https://nouamanetazi.me/' target='_blank'>Space by Nouamane Tazi</a><br><a href='https://keras.io/examples/vision/conv_lstm/' target='_blank'>Keras example by Amogh Joshi</a></div>" - -iface = gr.Interface( - inference, # main function - inputs = [ - gr.inputs.Video(label='Video', type='mp4'), - gr.inputs.Slider(minimum=.1, maximum=.9, default=.5, step=.001, label="prediction start"), - gr.inputs.Checkbox(label="predict one frame only", default=True), - - ], - outputs = [ - gr.outputs.Video(label='result'), # generated video - gr.outputs.Video(label='ground truth') # same part of original video - ], - - title = 'Next-Frame Video Prediction with Convolutional LSTMs', - # description = "This app is an unofficial demo web app of the Next-Frame Video Prediction with Convolutional LSTMs by Keras.", - article = article, - examples = samples, -).launch(enable_queue=True, cache_examples=True) \ No newline at end of file diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/template_model.py b/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/template_model.py deleted file mode 100644 index dac7b33d5889777eb63c9882a3b9fa094dcab293..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/face3d/models/template_model.py +++ /dev/null @@ -1,100 +0,0 @@ -"""Model class template - -This module provides a template for users to implement custom models. -You can specify '--model template' to use this model. -The class name should be consistent with both the filename and its model option. -The filename should be <model>_dataset.py -The class name should be <Model>Dataset.py -It implements a simple image-to-image translation baseline based on regression loss. -Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss: - min_<netG> ||netG(data_A) - data_B||_1 -You need to implement the following functions: - <modify_commandline_options>: Add model-specific options and rewrite default values for existing options. - <__init__>: Initialize this model class. - <set_input>: Unpack input data and perform data pre-processing. - <forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>. - <optimize_parameters>: Update network weights; it will be called in every training iteration. -""" -import numpy as np -import torch -from .base_model import BaseModel -from . import networks - - -class TemplateModel(BaseModel): - @staticmethod - def modify_commandline_options(parser, is_train=True): - """Add new model-specific options and rewrite default values for existing options. - - Parameters: - parser -- the option parser - is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options. - - Returns: - the modified parser. - """ - parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset. - if is_train: - parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model. - - return parser - - def __init__(self, opt): - """Initialize this model class. - - Parameters: - opt -- training/test options - - A few things can be done here. - - (required) call the initialization function of BaseModel - - define loss function, visualization images, model names, and optimizers - """ - BaseModel.__init__(self, opt) # call the initialization method of BaseModel - # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk. - self.loss_names = ['loss_G'] - # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images. - self.visual_names = ['data_A', 'data_B', 'output'] - # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks. - # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them. - self.model_names = ['G'] - # define networks; you can use opt.isTrain to specify different behaviors for training and test. - self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids) - if self.isTrain: # only defined during training time - # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss. - # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device) - self.criterionLoss = torch.nn.L1Loss() - # define and initialize optimizers. You can define one optimizer for each network. - # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. - self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) - self.optimizers = [self.optimizer] - - # Our program will automatically call <model.setup> to define schedulers, load networks, and print networks - - def set_input(self, input): - """Unpack input data from the dataloader and perform necessary pre-processing steps. - - Parameters: - input: a dictionary that contains the data itself and its metadata information. - """ - AtoB = self.opt.direction == 'AtoB' # use <direction> to swap data_A and data_B - self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A - self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B - self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths - - def forward(self): - """Run forward pass. This will be called by both functions <optimize_parameters> and <test>.""" - self.output = self.netG(self.data_A) # generate output image given the input data_A - - def backward(self): - """Calculate losses, gradients, and update network weights; called in every training iteration""" - # caculate the intermediate results if necessary; here self.output has been computed during function <forward> - # calculate loss given the input and intermediate results - self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression - self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G - - def optimize_parameters(self): - """Update network weights; it will be called in every training iteration.""" - self.forward() # first call forward to calculate intermediate results - self.optimizer.zero_grad() # clear network G's existing gradients - self.backward() # calculate gradients for network G - self.optimizer.step() # update gradients for network G diff --git a/spaces/kevinwang676/M4Singer/modules/parallel_wavegan/__init__.py b/spaces/kevinwang676/M4Singer/modules/parallel_wavegan/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py b/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py deleted file mode 100644 index c6d3b9c240c24687d432197f976ee01fbf423216..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py +++ /dev/null @@ -1,187 +0,0 @@ -import torch -from torch import nn - -__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200'] - - -def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, - out_planes, - kernel_size=3, - stride=stride, - padding=dilation, - groups=groups, - bias=False, - dilation=dilation) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2d(in_planes, - out_planes, - kernel_size=1, - stride=stride, - bias=False) - - -class IBasicBlock(nn.Module): - expansion = 1 - def __init__(self, inplanes, planes, stride=1, downsample=None, - groups=1, base_width=64, dilation=1): - super(IBasicBlock, self).__init__() - if groups != 1 or base_width != 64: - raise ValueError('BasicBlock only supports groups=1 and base_width=64') - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") - self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,) - self.conv1 = conv3x3(inplanes, planes) - self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,) - self.prelu = nn.PReLU(planes) - self.conv2 = conv3x3(planes, planes, stride) - self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - identity = x - out = self.bn1(x) - out = self.conv1(out) - out = self.bn2(out) - out = self.prelu(out) - out = self.conv2(out) - out = self.bn3(out) - if self.downsample is not None: - identity = self.downsample(x) - out += identity - return out - - -class IResNet(nn.Module): - fc_scale = 7 * 7 - def __init__(self, - block, layers, dropout=0, num_features=512, zero_init_residual=False, - groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False): - super(IResNet, self).__init__() - self.fp16 = fp16 - self.inplanes = 64 - self.dilation = 1 - if replace_stride_with_dilation is None: - replace_stride_with_dilation = [False, False, False] - if len(replace_stride_with_dilation) != 3: - raise ValueError("replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) - self.groups = groups - self.base_width = width_per_group - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05) - self.prelu = nn.PReLU(self.inplanes) - self.layer1 = self._make_layer(block, 64, layers[0], stride=2) - self.layer2 = self._make_layer(block, - 128, - layers[1], - stride=2, - dilate=replace_stride_with_dilation[0]) - self.layer3 = self._make_layer(block, - 256, - layers[2], - stride=2, - dilate=replace_stride_with_dilation[1]) - self.layer4 = self._make_layer(block, - 512, - layers[3], - stride=2, - dilate=replace_stride_with_dilation[2]) - self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,) - self.dropout = nn.Dropout(p=dropout, inplace=True) - self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features) - self.features = nn.BatchNorm1d(num_features, eps=1e-05) - nn.init.constant_(self.features.weight, 1.0) - self.features.weight.requires_grad = False - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.normal_(m.weight, 0, 0.1) - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - if zero_init_residual: - for m in self.modules(): - if isinstance(m, IBasicBlock): - nn.init.constant_(m.bn2.weight, 0) - - def _make_layer(self, block, planes, blocks, stride=1, dilate=False): - downsample = None - previous_dilation = self.dilation - if dilate: - self.dilation *= stride - stride = 1 - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ), - ) - layers = [] - layers.append( - block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation)) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append( - block(self.inplanes, - planes, - groups=self.groups, - base_width=self.base_width, - dilation=self.dilation)) - - return nn.Sequential(*layers) - - def forward(self, x): - with torch.cuda.amp.autocast(self.fp16): - x = self.conv1(x) - x = self.bn1(x) - x = self.prelu(x) - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - x = self.bn2(x) - x = torch.flatten(x, 1) - x = self.dropout(x) - x = self.fc(x.float() if self.fp16 else x) - x = self.features(x) - return x - - -def _iresnet(arch, block, layers, pretrained, progress, **kwargs): - model = IResNet(block, layers, **kwargs) - if pretrained: - raise ValueError() - return model - - -def iresnet18(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained, - progress, **kwargs) - - -def iresnet34(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained, - progress, **kwargs) - - -def iresnet50(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained, - progress, **kwargs) - - -def iresnet100(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained, - progress, **kwargs) - - -def iresnet200(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained, - progress, **kwargs) - diff --git a/spaces/kevinwang676/VoiceChanger/src/face3d/models/networks.py b/spaces/kevinwang676/VoiceChanger/src/face3d/models/networks.py deleted file mode 100644 index ead9cdcb8720b845c233de79dc8a8d1668492108..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChanger/src/face3d/models/networks.py +++ /dev/null @@ -1,521 +0,0 @@ -"""This script defines deep neural networks for Deep3DFaceRecon_pytorch -""" - -import os -import numpy as np -import torch.nn.functional as F -from torch.nn import init -import functools -from torch.optim import lr_scheduler -import torch -from torch import Tensor -import torch.nn as nn -try: - from torch.hub import load_state_dict_from_url -except ImportError: - from torch.utils.model_zoo import load_url as load_state_dict_from_url -from typing import Type, Any, Callable, Union, List, Optional -from .arcface_torch.backbones import get_model -from kornia.geometry import warp_affine - -def resize_n_crop(image, M, dsize=112): - # image: (b, c, h, w) - # M : (b, 2, 3) - return warp_affine(image, M, dsize=(dsize, dsize), align_corners=True) - -def filter_state_dict(state_dict, remove_name='fc'): - new_state_dict = {} - for key in state_dict: - if remove_name in key: - continue - new_state_dict[key] = state_dict[key] - return new_state_dict - -def get_scheduler(optimizer, opt): - """Return a learning rate scheduler - - Parameters: - optimizer -- the optimizer of the network - opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.  - opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine - - For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. - See https://pytorch.org/docs/stable/optim.html for more details. - """ - if opt.lr_policy == 'linear': - def lambda_rule(epoch): - lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs + 1) - return lr_l - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) - elif opt.lr_policy == 'step': - scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_epochs, gamma=0.2) - elif opt.lr_policy == 'plateau': - scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) - elif opt.lr_policy == 'cosine': - scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0) - else: - return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) - return scheduler - - -def define_net_recon(net_recon, use_last_fc=False, init_path=None): - return ReconNetWrapper(net_recon, use_last_fc=use_last_fc, init_path=init_path) - -def define_net_recog(net_recog, pretrained_path=None): - net = RecogNetWrapper(net_recog=net_recog, pretrained_path=pretrained_path) - net.eval() - return net - -class ReconNetWrapper(nn.Module): - fc_dim=257 - def __init__(self, net_recon, use_last_fc=False, init_path=None): - super(ReconNetWrapper, self).__init__() - self.use_last_fc = use_last_fc - if net_recon not in func_dict: - return NotImplementedError('network [%s] is not implemented', net_recon) - func, last_dim = func_dict[net_recon] - backbone = func(use_last_fc=use_last_fc, num_classes=self.fc_dim) - if init_path and os.path.isfile(init_path): - state_dict = filter_state_dict(torch.load(init_path, map_location='cpu')) - backbone.load_state_dict(state_dict) - print("loading init net_recon %s from %s" %(net_recon, init_path)) - self.backbone = backbone - if not use_last_fc: - self.final_layers = nn.ModuleList([ - conv1x1(last_dim, 80, bias=True), # id layer - conv1x1(last_dim, 64, bias=True), # exp layer - conv1x1(last_dim, 80, bias=True), # tex layer - conv1x1(last_dim, 3, bias=True), # angle layer - conv1x1(last_dim, 27, bias=True), # gamma layer - conv1x1(last_dim, 2, bias=True), # tx, ty - conv1x1(last_dim, 1, bias=True) # tz - ]) - for m in self.final_layers: - nn.init.constant_(m.weight, 0.) - nn.init.constant_(m.bias, 0.) - - def forward(self, x): - x = self.backbone(x) - if not self.use_last_fc: - output = [] - for layer in self.final_layers: - output.append(layer(x)) - x = torch.flatten(torch.cat(output, dim=1), 1) - return x - - -class RecogNetWrapper(nn.Module): - def __init__(self, net_recog, pretrained_path=None, input_size=112): - super(RecogNetWrapper, self).__init__() - net = get_model(name=net_recog, fp16=False) - if pretrained_path: - state_dict = torch.load(pretrained_path, map_location='cpu') - net.load_state_dict(state_dict) - print("loading pretrained net_recog %s from %s" %(net_recog, pretrained_path)) - for param in net.parameters(): - param.requires_grad = False - self.net = net - self.preprocess = lambda x: 2 * x - 1 - self.input_size=input_size - - def forward(self, image, M): - image = self.preprocess(resize_n_crop(image, M, self.input_size)) - id_feature = F.normalize(self.net(image), dim=-1, p=2) - return id_feature - - -# adapted from https://github.com/pytorch/vision/edit/master/torchvision/models/resnet.py -__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', - 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', - 'wide_resnet50_2', 'wide_resnet101_2'] - - -model_urls = { - 'resnet18': 'https://download.pytorch.org/models/resnet18-f37072fd.pth', - 'resnet34': 'https://download.pytorch.org/models/resnet34-b627a593.pth', - 'resnet50': 'https://download.pytorch.org/models/resnet50-0676ba61.pth', - 'resnet101': 'https://download.pytorch.org/models/resnet101-63fe2227.pth', - 'resnet152': 'https://download.pytorch.org/models/resnet152-394f9c45.pth', - 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', - 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', - 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', - 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', -} - - -def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d: - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=dilation, groups=groups, bias=False, dilation=dilation) - - -def conv1x1(in_planes: int, out_planes: int, stride: int = 1, bias: bool = False) -> nn.Conv2d: - """1x1 convolution""" - return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=bias) - - -class BasicBlock(nn.Module): - expansion: int = 1 - - def __init__( - self, - inplanes: int, - planes: int, - stride: int = 1, - downsample: Optional[nn.Module] = None, - groups: int = 1, - base_width: int = 64, - dilation: int = 1, - norm_layer: Optional[Callable[..., nn.Module]] = None - ) -> None: - super(BasicBlock, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - if groups != 1 or base_width != 64: - raise ValueError('BasicBlock only supports groups=1 and base_width=64') - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") - # Both self.conv1 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = norm_layer(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = norm_layer(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x: Tensor) -> Tensor: - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) - # while original implementation places the stride at the first 1x1 convolution(self.conv1) - # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. - # This variant is also known as ResNet V1.5 and improves accuracy according to - # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. - - expansion: int = 4 - - def __init__( - self, - inplanes: int, - planes: int, - stride: int = 1, - downsample: Optional[nn.Module] = None, - groups: int = 1, - base_width: int = 64, - dilation: int = 1, - norm_layer: Optional[Callable[..., nn.Module]] = None - ) -> None: - super(Bottleneck, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - width = int(planes * (base_width / 64.)) * groups - # Both self.conv2 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv1x1(inplanes, width) - self.bn1 = norm_layer(width) - self.conv2 = conv3x3(width, width, stride, groups, dilation) - self.bn2 = norm_layer(width) - self.conv3 = conv1x1(width, planes * self.expansion) - self.bn3 = norm_layer(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x: Tensor) -> Tensor: - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - - def __init__( - self, - block: Type[Union[BasicBlock, Bottleneck]], - layers: List[int], - num_classes: int = 1000, - zero_init_residual: bool = False, - use_last_fc: bool = False, - groups: int = 1, - width_per_group: int = 64, - replace_stride_with_dilation: Optional[List[bool]] = None, - norm_layer: Optional[Callable[..., nn.Module]] = None - ) -> None: - super(ResNet, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - self._norm_layer = norm_layer - - self.inplanes = 64 - self.dilation = 1 - if replace_stride_with_dilation is None: - # each element in the tuple indicates if we should replace - # the 2x2 stride with a dilated convolution instead - replace_stride_with_dilation = [False, False, False] - if len(replace_stride_with_dilation) != 3: - raise ValueError("replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) - self.use_last_fc = use_last_fc - self.groups = groups - self.base_width = width_per_group - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, - bias=False) - self.bn1 = norm_layer(self.inplanes) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2, - dilate=replace_stride_with_dilation[0]) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2, - dilate=replace_stride_with_dilation[1]) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2, - dilate=replace_stride_with_dilation[2]) - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - - if self.use_last_fc: - self.fc = nn.Linear(512 * block.expansion, num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - - - # Zero-initialize the last BN in each residual branch, - # so that the residual branch starts with zeros, and each residual block behaves like an identity. - # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 - if zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type] - elif isinstance(m, BasicBlock): - nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type] - - def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int, - stride: int = 1, dilate: bool = False) -> nn.Sequential: - norm_layer = self._norm_layer - downsample = None - previous_dilation = self.dilation - if dilate: - self.dilation *= stride - stride = 1 - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - norm_layer(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation, norm_layer)) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append(block(self.inplanes, planes, groups=self.groups, - base_width=self.base_width, dilation=self.dilation, - norm_layer=norm_layer)) - - return nn.Sequential(*layers) - - def _forward_impl(self, x: Tensor) -> Tensor: - # See note [TorchScript super()] - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.avgpool(x) - if self.use_last_fc: - x = torch.flatten(x, 1) - x = self.fc(x) - return x - - def forward(self, x: Tensor) -> Tensor: - return self._forward_impl(x) - - -def _resnet( - arch: str, - block: Type[Union[BasicBlock, Bottleneck]], - layers: List[int], - pretrained: bool, - progress: bool, - **kwargs: Any -) -> ResNet: - model = ResNet(block, layers, **kwargs) - if pretrained: - state_dict = load_state_dict_from_url(model_urls[arch], - progress=progress) - model.load_state_dict(state_dict) - return model - - -def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNet-18 model from - `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, - **kwargs) - - -def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNet-34 model from - `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, - **kwargs) - - -def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNet-50 model from - `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, - **kwargs) - - -def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNet-101 model from - `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, - **kwargs) - - -def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNet-152 model from - `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, - **kwargs) - - -def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNeXt-50 32x4d model from - `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs['groups'] = 32 - kwargs['width_per_group'] = 4 - return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], - pretrained, progress, **kwargs) - - -def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""ResNeXt-101 32x8d model from - `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs['groups'] = 32 - kwargs['width_per_group'] = 8 - return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], - pretrained, progress, **kwargs) - - -def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""Wide ResNet-50-2 model from - `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. - - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 - channels, and in Wide ResNet-50-2 has 2048-1024-2048. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs['width_per_group'] = 64 * 2 - return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], - pretrained, progress, **kwargs) - - -def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: - r"""Wide ResNet-101-2 model from - `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. - - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 - channels, and in Wide ResNet-50-2 has 2048-1024-2048. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - """ - kwargs['width_per_group'] = 64 * 2 - return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], - pretrained, progress, **kwargs) - - -func_dict = { - 'resnet18': (resnet18, 512), - 'resnet50': (resnet50, 2048) -} diff --git a/spaces/kevinwang676/VoiceChangers/src/utils/model2safetensor.py b/spaces/kevinwang676/VoiceChangers/src/utils/model2safetensor.py deleted file mode 100644 index 50c485000d43ba9c230a0bc64ce8aeaaec6e2b29..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChangers/src/utils/model2safetensor.py +++ /dev/null @@ -1,141 +0,0 @@ -import torch -import yaml -import os - -import safetensors -from safetensors.torch import save_file -from yacs.config import CfgNode as CN -import sys - -sys.path.append('/apdcephfs/private_shadowcun/SadTalker') - -from src.face3d.models import networks - -from src.facerender.modules.keypoint_detector import HEEstimator, KPDetector -from src.facerender.modules.mapping import MappingNet -from src.facerender.modules.generator import OcclusionAwareGenerator, OcclusionAwareSPADEGenerator - -from src.audio2pose_models.audio2pose import Audio2Pose -from src.audio2exp_models.networks import SimpleWrapperV2 -from src.test_audio2coeff import load_cpk - -size = 256 -############ face vid2vid -config_path = os.path.join('src', 'config', 'facerender.yaml') -current_root_path = '.' - -path_of_net_recon_model = os.path.join(current_root_path, 'checkpoints', 'epoch_20.pth') -net_recon = networks.define_net_recon(net_recon='resnet50', use_last_fc=False, init_path='') -checkpoint = torch.load(path_of_net_recon_model, map_location='cpu') -net_recon.load_state_dict(checkpoint['net_recon']) - -with open(config_path) as f: - config = yaml.safe_load(f) - -generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'], - **config['model_params']['common_params']) -kp_extractor = KPDetector(**config['model_params']['kp_detector_params'], - **config['model_params']['common_params']) -he_estimator = HEEstimator(**config['model_params']['he_estimator_params'], - **config['model_params']['common_params']) -mapping = MappingNet(**config['model_params']['mapping_params']) - -def load_cpk_facevid2vid(checkpoint_path, generator=None, discriminator=None, - kp_detector=None, he_estimator=None, optimizer_generator=None, - optimizer_discriminator=None, optimizer_kp_detector=None, - optimizer_he_estimator=None, device="cpu"): - - checkpoint = torch.load(checkpoint_path, map_location=torch.device(device)) - if generator is not None: - generator.load_state_dict(checkpoint['generator']) - if kp_detector is not None: - kp_detector.load_state_dict(checkpoint['kp_detector']) - if he_estimator is not None: - he_estimator.load_state_dict(checkpoint['he_estimator']) - if discriminator is not None: - try: - discriminator.load_state_dict(checkpoint['discriminator']) - except: - print ('No discriminator in the state-dict. Dicriminator will be randomly initialized') - if optimizer_generator is not None: - optimizer_generator.load_state_dict(checkpoint['optimizer_generator']) - if optimizer_discriminator is not None: - try: - optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator']) - except RuntimeError as e: - print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized') - if optimizer_kp_detector is not None: - optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector']) - if optimizer_he_estimator is not None: - optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator']) - - return checkpoint['epoch'] - - -def load_cpk_facevid2vid_safetensor(checkpoint_path, generator=None, - kp_detector=None, he_estimator=None, - device="cpu"): - - checkpoint = safetensors.torch.load_file(checkpoint_path) - - if generator is not None: - x_generator = {} - for k,v in checkpoint.items(): - if 'generator' in k: - x_generator[k.replace('generator.', '')] = v - generator.load_state_dict(x_generator) - if kp_detector is not None: - x_generator = {} - for k,v in checkpoint.items(): - if 'kp_extractor' in k: - x_generator[k.replace('kp_extractor.', '')] = v - kp_detector.load_state_dict(x_generator) - if he_estimator is not None: - x_generator = {} - for k,v in checkpoint.items(): - if 'he_estimator' in k: - x_generator[k.replace('he_estimator.', '')] = v - he_estimator.load_state_dict(x_generator) - - return None - -free_view_checkpoint = '/apdcephfs/private_shadowcun/SadTalker/checkpoints/facevid2vid_'+str(size)+'-model.pth.tar' -load_cpk_facevid2vid(free_view_checkpoint, kp_detector=kp_extractor, generator=generator, he_estimator=he_estimator) - -wav2lip_checkpoint = os.path.join(current_root_path, 'checkpoints', 'wav2lip.pth') - -audio2pose_checkpoint = os.path.join(current_root_path, 'checkpoints', 'auido2pose_00140-model.pth') -audio2pose_yaml_path = os.path.join(current_root_path, 'src', 'config', 'auido2pose.yaml') - -audio2exp_checkpoint = os.path.join(current_root_path, 'checkpoints', 'auido2exp_00300-model.pth') -audio2exp_yaml_path = os.path.join(current_root_path, 'src', 'config', 'auido2exp.yaml') - -fcfg_pose = open(audio2pose_yaml_path) -cfg_pose = CN.load_cfg(fcfg_pose) -cfg_pose.freeze() -audio2pose_model = Audio2Pose(cfg_pose, wav2lip_checkpoint) -audio2pose_model.eval() -load_cpk(audio2pose_checkpoint, model=audio2pose_model, device='cpu') - -# load audio2exp_model -netG = SimpleWrapperV2() -netG.eval() -load_cpk(audio2exp_checkpoint, model=netG, device='cpu') - -class SadTalker(torch.nn.Module): - def __init__(self, kp_extractor, generator, netG, audio2pose, face_3drecon): - super(SadTalker, self).__init__() - self.kp_extractor = kp_extractor - self.generator = generator - self.audio2exp = netG - self.audio2pose = audio2pose - self.face_3drecon = face_3drecon - - -model = SadTalker(kp_extractor, generator, netG, audio2pose_model, net_recon) - -# here, we want to convert it to safetensor -save_file(model.state_dict(), "checkpoints/SadTalker_V0.0.2_"+str(size)+".safetensors") - -### test -load_cpk_facevid2vid_safetensor('checkpoints/SadTalker_V0.0.2_'+str(size)+'.safetensors', kp_detector=kp_extractor, generator=generator, he_estimator=None) \ No newline at end of file diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/adaptive_span/adaptive_span_loss.py b/spaces/koajoel/PolyFormer/fairseq/examples/adaptive_span/adaptive_span_loss.py deleted file mode 100644 index 056245807e5f8d313a8ad5be68aea4e285f4f580..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/adaptive_span/adaptive_span_loss.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from dataclasses import dataclass - -import torch.nn.functional as F -from fairseq import metrics, utils -from fairseq.criterions import register_criterion -from fairseq.criterions.cross_entropy import CrossEntropyCriterion -from fairseq.dataclass import FairseqDataclass -from omegaconf import II - - -@dataclass -class AdaptiveSpanCriterionConfig(FairseqDataclass): - sentence_avg: bool = II("optimization.sentence_avg") - - -@register_criterion("adaptive_span_loss", dataclass=AdaptiveSpanCriterionConfig) -class AdaptiveSpanCriterion(CrossEntropyCriterion): - def __init__(self, task, sentence_avg): - super().__init__(task, sentence_avg) - - def forward(self, model, sample, reduce=True): - """Compute the loss for the given sample. - - Returns a tuple with three elements: - 1) the loss here is summed, different from the adaptive span code - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - net_output = model(**sample["net_input"]) - loss, aux_loss, avg_span, max_span = self.compute_loss( - model, net_output, sample, reduce=reduce - ) - sample_size = ( - sample["target"].size(0) if self.sentence_avg else sample["ntokens"] - ) - loss /= sample_size - total_loss = loss + aux_loss - sample_size = 1 - - logging_output = { - "loss": loss.data, - "ntokens": sample["ntokens"], - "nsentences": sample["target"].size(0), - "sample_size": sample_size, - "total_loss": total_loss.data, - "avg_span": avg_span * sample_size, - "max_span": max_span * sample_size, - } - return total_loss, sample_size, logging_output - - def compute_loss(self, model, net_output, sample, reduce=True): - loss, _ = super().compute_loss(model, net_output, sample, reduce) - aux_loss = model.get_aux_loss() - avg_span = model.get_current_avg_span() - max_span = model.get_current_max_span() - return loss, aux_loss, avg_span, max_span - - @staticmethod - def reduce_metrics(logging_outputs) -> None: - """Aggregate logging outputs from data parallel training.""" - loss_sum = sum(log.get("loss", 0) for log in logging_outputs) - ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) - sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) - total_loss_sum = sum(log.get("total_loss", 0) for log in logging_outputs) - avg_span_sum = sum(log.get("avg_span", 0) for log in logging_outputs) - max_span_sum = sum(log.get("max_span", 0) for log in logging_outputs) - - # we divide by log(2) to convert the loss from base e to base 2 - metrics.log_scalar( - "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 - ) - metrics.log_scalar("avg_span", avg_span_sum / sample_size, sample_size, round=3) - metrics.log_scalar("max_span", max_span_sum / sample_size, sample_size, round=3) - # total loss contains the L1 norm on adaptive-span - metrics.log_scalar( - "total_loss", - total_loss_sum / sample_size / math.log(2), - sample_size, - round=3, - ) - if sample_size != ntokens: - metrics.log_scalar( - "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 - ) - metrics.log_derived( - "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) - ) - else: - metrics.log_derived( - "ppl", lambda meters: utils.get_perplexity(meters["loss"].avg) - ) - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return True diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/attrs/exceptions.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/attrs/exceptions.py deleted file mode 100644 index bd9efed202ab1cdf57a9e99cb4e094ef6f38d0c0..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/attrs/exceptions.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: MIT - -from attr.exceptions import * # noqa diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/dateutil/tz/win.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/dateutil/tz/win.py deleted file mode 100644 index cde07ba792c40903f0c334839140173b39fd8124..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/dateutil/tz/win.py +++ /dev/null @@ -1,370 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module provides an interface to the native time zone data on Windows, -including :py:class:`datetime.tzinfo` implementations. - -Attempting to import this module on a non-Windows platform will raise an -:py:obj:`ImportError`. -""" -# This code was originally contributed by Jeffrey Harris. -import datetime -import struct - -from six.moves import winreg -from six import text_type - -try: - import ctypes - from ctypes import wintypes -except ValueError: - # ValueError is raised on non-Windows systems for some horrible reason. - raise ImportError("Running tzwin on non-Windows system") - -from ._common import tzrangebase - -__all__ = ["tzwin", "tzwinlocal", "tzres"] - -ONEWEEK = datetime.timedelta(7) - -TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" -TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" -TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" - - -def _settzkeyname(): - handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) - try: - winreg.OpenKey(handle, TZKEYNAMENT).Close() - TZKEYNAME = TZKEYNAMENT - except WindowsError: - TZKEYNAME = TZKEYNAME9X - handle.Close() - return TZKEYNAME - - -TZKEYNAME = _settzkeyname() - - -class tzres(object): - """ - Class for accessing ``tzres.dll``, which contains timezone name related - resources. - - .. versionadded:: 2.5.0 - """ - p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char - - def __init__(self, tzres_loc='tzres.dll'): - # Load the user32 DLL so we can load strings from tzres - user32 = ctypes.WinDLL('user32') - - # Specify the LoadStringW function - user32.LoadStringW.argtypes = (wintypes.HINSTANCE, - wintypes.UINT, - wintypes.LPWSTR, - ctypes.c_int) - - self.LoadStringW = user32.LoadStringW - self._tzres = ctypes.WinDLL(tzres_loc) - self.tzres_loc = tzres_loc - - def load_name(self, offset): - """ - Load a timezone name from a DLL offset (integer). - - >>> from dateutil.tzwin import tzres - >>> tzr = tzres() - >>> print(tzr.load_name(112)) - 'Eastern Standard Time' - - :param offset: - A positive integer value referring to a string from the tzres dll. - - .. note:: - - Offsets found in the registry are generally of the form - ``@tzres.dll,-114``. The offset in this case is 114, not -114. - - """ - resource = self.p_wchar() - lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) - nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) - return resource[:nchar] - - def name_from_string(self, tzname_str): - """ - Parse strings as returned from the Windows registry into the time zone - name as defined in the registry. - - >>> from dateutil.tzwin import tzres - >>> tzr = tzres() - >>> print(tzr.name_from_string('@tzres.dll,-251')) - 'Dateline Daylight Time' - >>> print(tzr.name_from_string('Eastern Standard Time')) - 'Eastern Standard Time' - - :param tzname_str: - A timezone name string as returned from a Windows registry key. - - :return: - Returns the localized timezone string from tzres.dll if the string - is of the form `@tzres.dll,-offset`, else returns the input string. - """ - if not tzname_str.startswith('@'): - return tzname_str - - name_splt = tzname_str.split(',-') - try: - offset = int(name_splt[1]) - except: - raise ValueError("Malformed timezone string.") - - return self.load_name(offset) - - -class tzwinbase(tzrangebase): - """tzinfo class based on win32's timezones available in the registry.""" - def __init__(self): - raise NotImplementedError('tzwinbase is an abstract base class') - - def __eq__(self, other): - # Compare on all relevant dimensions, including name. - if not isinstance(other, tzwinbase): - return NotImplemented - - return (self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset and - self._stddayofweek == other._stddayofweek and - self._dstdayofweek == other._dstdayofweek and - self._stdweeknumber == other._stdweeknumber and - self._dstweeknumber == other._dstweeknumber and - self._stdhour == other._stdhour and - self._dsthour == other._dsthour and - self._stdminute == other._stdminute and - self._dstminute == other._dstminute and - self._std_abbr == other._std_abbr and - self._dst_abbr == other._dst_abbr) - - @staticmethod - def list(): - """Return a list of all time zones known to the system.""" - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - with winreg.OpenKey(handle, TZKEYNAME) as tzkey: - result = [winreg.EnumKey(tzkey, i) - for i in range(winreg.QueryInfoKey(tzkey)[0])] - return result - - def display(self): - """ - Return the display name of the time zone. - """ - return self._display - - def transitions(self, year): - """ - For a given year, get the DST on and off transition times, expressed - always on the standard time side. For zones with no transitions, this - function returns ``None``. - - :param year: - The year whose transitions you would like to query. - - :return: - Returns a :class:`tuple` of :class:`datetime.datetime` objects, - ``(dston, dstoff)`` for zones with an annual DST transition, or - ``None`` for fixed offset zones. - """ - - if not self.hasdst: - return None - - dston = picknthweekday(year, self._dstmonth, self._dstdayofweek, - self._dsthour, self._dstminute, - self._dstweeknumber) - - dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek, - self._stdhour, self._stdminute, - self._stdweeknumber) - - # Ambiguous dates default to the STD side - dstoff -= self._dst_base_offset - - return dston, dstoff - - def _get_hasdst(self): - return self._dstmonth != 0 - - @property - def _dst_base_offset(self): - return self._dst_base_offset_ - - -class tzwin(tzwinbase): - """ - Time zone object created from the zone info in the Windows registry - - These are similar to :py:class:`dateutil.tz.tzrange` objects in that - the time zone data is provided in the format of a single offset rule - for either 0 or 2 time zone transitions per year. - - :param: name - The name of a Windows time zone key, e.g. "Eastern Standard Time". - The full list of keys can be retrieved with :func:`tzwin.list`. - """ - - def __init__(self, name): - self._name = name - - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name) - with winreg.OpenKey(handle, tzkeyname) as tzkey: - keydict = valuestodict(tzkey) - - self._std_abbr = keydict["Std"] - self._dst_abbr = keydict["Dlt"] - - self._display = keydict["Display"] - - # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm - tup = struct.unpack("=3l16h", keydict["TZI"]) - stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 - dstoffset = stdoffset-tup[2] # + DaylightBias * -1 - self._std_offset = datetime.timedelta(minutes=stdoffset) - self._dst_offset = datetime.timedelta(minutes=dstoffset) - - # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs - # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx - (self._stdmonth, - self._stddayofweek, # Sunday = 0 - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[4:9] - - (self._dstmonth, - self._dstdayofweek, # Sunday = 0 - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[12:17] - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = self._get_hasdst() - - def __repr__(self): - return "tzwin(%s)" % repr(self._name) - - def __reduce__(self): - return (self.__class__, (self._name,)) - - -class tzwinlocal(tzwinbase): - """ - Class representing the local time zone information in the Windows registry - - While :class:`dateutil.tz.tzlocal` makes system calls (via the :mod:`time` - module) to retrieve time zone information, ``tzwinlocal`` retrieves the - rules directly from the Windows registry and creates an object like - :class:`dateutil.tz.tzwin`. - - Because Windows does not have an equivalent of :func:`time.tzset`, on - Windows, :class:`dateutil.tz.tzlocal` instances will always reflect the - time zone settings *at the time that the process was started*, meaning - changes to the machine's time zone settings during the run of a program - on Windows will **not** be reflected by :class:`dateutil.tz.tzlocal`. - Because ``tzwinlocal`` reads the registry directly, it is unaffected by - this issue. - """ - def __init__(self): - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: - keydict = valuestodict(tzlocalkey) - - self._std_abbr = keydict["StandardName"] - self._dst_abbr = keydict["DaylightName"] - - try: - tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME, - sn=self._std_abbr) - with winreg.OpenKey(handle, tzkeyname) as tzkey: - _keydict = valuestodict(tzkey) - self._display = _keydict["Display"] - except OSError: - self._display = None - - stdoffset = -keydict["Bias"]-keydict["StandardBias"] - dstoffset = stdoffset-keydict["DaylightBias"] - - self._std_offset = datetime.timedelta(minutes=stdoffset) - self._dst_offset = datetime.timedelta(minutes=dstoffset) - - # For reasons unclear, in this particular key, the day of week has been - # moved to the END of the SYSTEMTIME structure. - tup = struct.unpack("=8h", keydict["StandardStart"]) - - (self._stdmonth, - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[1:5] - - self._stddayofweek = tup[7] - - tup = struct.unpack("=8h", keydict["DaylightStart"]) - - (self._dstmonth, - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[1:5] - - self._dstdayofweek = tup[7] - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = self._get_hasdst() - - def __repr__(self): - return "tzwinlocal()" - - def __str__(self): - # str will return the standard name, not the daylight name. - return "tzwinlocal(%s)" % repr(self._std_abbr) - - def __reduce__(self): - return (self.__class__, ()) - - -def picknthweekday(year, month, dayofweek, hour, minute, whichweek): - """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ - first = datetime.datetime(year, month, 1, hour, minute) - - # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), - # Because 7 % 7 = 0 - weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) - wd = weekdayone + ((whichweek - 1) * ONEWEEK) - if (wd.month != month): - wd -= ONEWEEK - - return wd - - -def valuestodict(key): - """Convert a registry key's values to a dictionary.""" - dout = {} - size = winreg.QueryInfoKey(key)[1] - tz_res = None - - for i in range(size): - key_name, value, dtype = winreg.EnumValue(key, i) - if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: - # If it's a DWORD (32-bit integer), it's stored as unsigned - convert - # that to a proper signed integer - if value & (1 << 31): - value = value - (1 << 32) - elif dtype == winreg.REG_SZ: - # If it's a reference to the tzres DLL, load the actual string - if value.startswith('@tzres'): - tz_res = tz_res or tzres() - value = tz_res.name_from_string(value) - - value = value.rstrip('\x00') # Remove trailing nulls - - dout[key_name] = value - - return dout diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_headers.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_headers.py deleted file mode 100644 index 846cca3f1d3c3f000de92840a89fb11e35f2083f..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_headers.py +++ /dev/null @@ -1,234 +0,0 @@ -# coding=utf-8 -# Copyright 2022-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Contains utilities to handle headers to send in calls to Huggingface Hub.""" -from typing import Dict, Optional, Union - -from .. import constants -from ._hf_folder import HfFolder -from ._runtime import ( - get_fastai_version, - get_fastcore_version, - get_hf_hub_version, - get_python_version, - get_tf_version, - get_torch_version, - is_fastai_available, - is_fastcore_available, - is_tf_available, - is_torch_available, -) -from ._validators import validate_hf_hub_args - - -class LocalTokenNotFoundError(EnvironmentError): - """Raised if local token is required but not found.""" - - -@validate_hf_hub_args -def build_hf_headers( - *, - token: Optional[Union[bool, str]] = None, - is_write_action: bool = False, - library_name: Optional[str] = None, - library_version: Optional[str] = None, - user_agent: Union[Dict, str, None] = None, -) -> Dict[str, str]: - """ - Build headers dictionary to send in a HF Hub call. - - By default, authorization token is always provided either from argument (explicit - use) or retrieved from the cache (implicit use). To explicitly avoid sending the - token to the Hub, set `token=False` or set the `HF_HUB_DISABLE_IMPLICIT_TOKEN` - environment variable. - - In case of an API call that requires write access, an error is thrown if token is - `None` or token is an organization token (starting with `"api_org***"`). - - In addition to the auth header, a user-agent is added to provide information about - the installed packages (versions of python, huggingface_hub, torch, tensorflow, - fastai and fastcore). - - Args: - token (`str`, `bool`, *optional*): - The token to be sent in authorization header for the Hub call: - - if a string, it is used as the Hugging Face token - - if `True`, the token is read from the machine (cache or env variable) - - if `False`, authorization header is not set - - if `None`, the token is read from the machine only except if - `HF_HUB_DISABLE_IMPLICIT_TOKEN` env variable is set. - is_write_action (`bool`, default to `False`): - Set to True if the API call requires a write access. If `True`, the token - will be validated (cannot be `None`, cannot start by `"api_org***"`). - library_name (`str`, *optional*): - The name of the library that is making the HTTP request. Will be added to - the user-agent header. - library_version (`str`, *optional*): - The version of the library that is making the HTTP request. Will be added - to the user-agent header. - user_agent (`str`, `dict`, *optional*): - The user agent info in the form of a dictionary or a single string. It will - be completed with information about the installed packages. - - Returns: - A `Dict` of headers to pass in your API call. - - Example: - ```py - >>> build_hf_headers(token="hf_***") # explicit token - {"authorization": "Bearer hf_***", "user-agent": ""} - - >>> build_hf_headers(token=True) # explicitly use cached token - {"authorization": "Bearer hf_***",...} - - >>> build_hf_headers(token=False) # explicitly don't use cached token - {"user-agent": ...} - - >>> build_hf_headers() # implicit use of the cached token - {"authorization": "Bearer hf_***",...} - - # HF_HUB_DISABLE_IMPLICIT_TOKEN=True # to set as env variable - >>> build_hf_headers() # token is not sent - {"user-agent": ...} - - >>> build_hf_headers(token="api_org_***", is_write_action=True) - ValueError: You must use your personal account token for write-access methods. - - >>> build_hf_headers(library_name="transformers", library_version="1.2.3") - {"authorization": ..., "user-agent": "transformers/1.2.3; hf_hub/0.10.2; python/3.10.4; tensorflow/1.55"} - ``` - - Raises: - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) - If organization token is passed and "write" access is required. - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError) - If "write" access is required but token is not passed and not saved locally. - [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError) - If `token=True` but token is not saved locally. - """ - # Get auth token to send - token_to_send = get_token_to_send(token) - _validate_token_to_send(token_to_send, is_write_action=is_write_action) - - # Combine headers - headers = { - "user-agent": _http_user_agent( - library_name=library_name, - library_version=library_version, - user_agent=user_agent, - ) - } - if token_to_send is not None: - headers["authorization"] = f"Bearer {token_to_send}" - return headers - - -def get_token_to_send(token: Optional[Union[bool, str]]) -> Optional[str]: - """Select the token to send from either `token` or the cache.""" - # Case token is explicitly provided - if isinstance(token, str): - return token - - # Case token is explicitly forbidden - if token is False: - return None - - # Token is not provided: we get it from local cache - cached_token = HfFolder().get_token() - - # Case token is explicitly required - if token is True: - if cached_token is None: - raise LocalTokenNotFoundError( - "Token is required (`token=True`), but no token found. You" - " need to provide a token or be logged in to Hugging Face with" - " `huggingface-cli login` or `huggingface_hub.login`. See" - " https://huggingface.co/settings/tokens." - ) - return cached_token - - # Case implicit use of the token is forbidden by env variable - if constants.HF_HUB_DISABLE_IMPLICIT_TOKEN: - return None - - # Otherwise: we use the cached token as the user has not explicitly forbidden it - return cached_token - - -def _validate_token_to_send(token: Optional[str], is_write_action: bool) -> None: - if is_write_action: - if token is None: - raise ValueError( - "Token is required (write-access action) but no token found. You need" - " to provide a token or be logged in to Hugging Face with" - " `huggingface-cli login` or `huggingface_hub.login`. See" - " https://huggingface.co/settings/tokens." - ) - if token.startswith("api_org"): - raise ValueError( - "You must use your personal account token for write-access methods. To" - " generate a write-access token, go to" - " https://huggingface.co/settings/tokens" - ) - - -def _http_user_agent( - *, - library_name: Optional[str] = None, - library_version: Optional[str] = None, - user_agent: Union[Dict, str, None] = None, -) -> str: - """Format a user-agent string containing information about the installed packages. - - Args: - library_name (`str`, *optional*): - The name of the library that is making the HTTP request. - library_version (`str`, *optional*): - The version of the library that is making the HTTP request. - user_agent (`str`, `dict`, *optional*): - The user agent info in the form of a dictionary or a single string. - - Returns: - The formatted user-agent string. - """ - if library_name is not None: - ua = f"{library_name}/{library_version}" - else: - ua = "unknown/None" - ua += f"; hf_hub/{get_hf_hub_version()}" - ua += f"; python/{get_python_version()}" - - if not constants.HF_HUB_DISABLE_TELEMETRY: - if is_torch_available(): - ua += f"; torch/{get_torch_version()}" - if is_tf_available(): - ua += f"; tensorflow/{get_tf_version()}" - if is_fastai_available(): - ua += f"; fastai/{get_fastai_version()}" - if is_fastcore_available(): - ua += f"; fastcore/{get_fastcore_version()}" - - if isinstance(user_agent, dict): - ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) - elif isinstance(user_agent, str): - ua += "; " + user_agent - - return _deduplicate_user_agent(ua) - - -def _deduplicate_user_agent(user_agent: str) -> str: - """Deduplicate redundant information in the generated user-agent.""" - # Split around ";" > Strip whitespaces > Store as dict keys (ensure unicity) > format back as string - # Order is implicitly preserved by dictionary structure (see https://stackoverflow.com/a/53657523). - return "; ".join({key.strip(): None for key in user_agent.split(";")}.keys()) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/rules_block/paragraph.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/rules_block/paragraph.py deleted file mode 100644 index fef7edf7803ab98c4c9f40c7118483838cbd0994..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/rules_block/paragraph.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Paragraph.""" -import logging - -from ..ruler import Ruler -from .state_block import StateBlock - -LOGGER = logging.getLogger(__name__) - - -def paragraph(state: StateBlock, startLine: int, endLine: int, silent: bool = False): - LOGGER.debug( - "entering paragraph: %s, %s, %s, %s", state, startLine, endLine, silent - ) - - nextLine = startLine + 1 - ruler: Ruler = state.md.block.ruler - terminatorRules = ruler.getRules("paragraph") - endLine = state.lineMax - - oldParentType = state.parentType - state.parentType = "paragraph" - - # jump line-by-line until empty one or EOF - while nextLine < endLine: - if state.isEmpty(nextLine): - break - # this would be a code block normally, but after paragraph - # it's considered a lazy continuation regardless of what's there - if state.sCount[nextLine] - state.blkIndent > 3: - nextLine += 1 - continue - - # quirk for blockquotes, this line should already be checked by that rule - if state.sCount[nextLine] < 0: - nextLine += 1 - continue - - # Some tags can terminate paragraph without empty line. - terminate = False - for terminatorRule in terminatorRules: - if terminatorRule(state, nextLine, endLine, True): - terminate = True - break - - if terminate: - break - - nextLine += 1 - - content = state.getLines(startLine, nextLine, state.blkIndent, False).strip() - - state.line = nextLine - - token = state.push("paragraph_open", "p", 1) - token.map = [startLine, state.line] - - token = state.push("inline", "", 0) - token.content = content - token.map = [startLine, state.line] - token.children = [] - - token = state.push("paragraph_close", "p", -1) - - state.parentType = oldParentType - - return True diff --git a/spaces/langvision/ChatWeb/EXAMPLE.md b/spaces/langvision/ChatWeb/EXAMPLE.md deleted file mode 100644 index c9ace37c7cbe2429832c37f9347389cd58d961f2..0000000000000000000000000000000000000000 --- a/spaces/langvision/ChatWeb/EXAMPLE.md +++ /dev/null @@ -1,24 +0,0 @@ ---- ---- -**user**: Write a report on the topic of LLM model - -**ChatGPT**: Title: The LLM Model: Enhancing Natural Language Processing Tasks -1. Introduction -Natural Language Processing (NLP) has gained significant prominence in recent years. Researchers and practitioners have been incessantly working towards improving various NLP tasks, such as machine translation, text classification, sentiment analysis, question-answering systems, and many others. The emergence of large pre-trained language models, such as the LLM Model, has revolutionized the field of NLP by achieving state-of-the-art results across various benchmark datasets. This report aims to explore the LLM model, its working mechanism, and its impact on NLP tasks. -2. Overview of the LLM Model -The LLM Model, short for “Large Language Model,” is a type of artificial intelligence model that leverages deep learning techniques, particularly transformer architectures, to extensively pre-train on a massive corpus of text data. It is trained on vast amounts of data from the internet, making it capable of learning grammar, contextual information, and complex relationship patterns. -3. Working Mechanism of the LLM Model -The LLM model consists of a multi-layer stack of encoders and decoders. During the pre-training stage, the model is exposed to numerous sentences from diverse domains and is trained to predict missing or masked words within the given context. This unsupervised learning approach enables the LLM model to capture the underlying semantics and syntactic structure of the language. Once pre-training is completed, fine-tuning is performed on specific downstream tasks to adapt the model for specialized NLP tasks. -4. Key Features and Advantages of the LLM Model -4.1 Transfer Learning: The LLM model’s primary advantage lies in its ability to learn from large-scale pre-training and subsequently adapt to specific tasks with minimal additional training. This transfer learning property allows the model to achieve excellent performance even when training data for specific tasks is limited. -4.2 Contextual Understanding: The LLM model captures complex contextual dependencies and generates more meaningful representations of the text. It can understand the subtle nuances of language and disambiguate ambiguous phrases based on the provided context. -4.3 Multilingual Support: Another significant advantage of the LLM model is its ability to handle multiple languages. It can effectively transfer knowledge across languages due to its shared parameters, reducing the need for language-specific models. -5. Impact on NLP Tasks -The introduction of the LLM model has significantly impacted a wide range of NLP tasks: -5.1 Machine Translation: The LLM model has achieved state-of-the-art performance in machine translation tasks by providing more accurate and coherent translations, even for low-resource languages. -5.2 Text Classification and Sentiment Analysis: With its deep contextual understanding, the LLM model has shown impressive performance in text classification tasks, sentiment analysis, and information extraction. -5.3 Question Answering Systems: By understanding the context and extracting relevant information, the LLM model has improved the performance of question-answering systems, particularly in dealing with complex, context-based questions. -6. Limitations and Future Directions -Despite its effectiveness, the LLM model does have some limitations. The model may suffer from biased outputs and could inadvertently perpetuate societal biases present in the training data. Furthermore, the model’s extensive computational requirements and massive memory footprint limit its accessibility to smaller-scale projects. Researchers are actively working towards addressing these limitations and exploring techniques to mitigate biases and optimize model size while maintaining performance. -7. Conclusion -The LLM model has emerged as a game-changer in the field of Natural Language Processing. Its superior performance, contextual understanding, and transfer learning capabilities have improved several NLP tasks, pushing the boundaries of what was previously achievable. However, further research and developments are necessary to address its limitations and ensure its ethical and responsible use in the ever-evolving landscape of NLP. \ No newline at end of file diff --git a/spaces/leafShen/CodeFormer/CodeFormer/inference_codeformer.py b/spaces/leafShen/CodeFormer/CodeFormer/inference_codeformer.py deleted file mode 100644 index fdfe8b301cc7c20c2fb653618e379d243603a108..0000000000000000000000000000000000000000 --- a/spaces/leafShen/CodeFormer/CodeFormer/inference_codeformer.py +++ /dev/null @@ -1,189 +0,0 @@ -# Modified by Shangchen Zhou from: https://github.com/TencentARC/GFPGAN/blob/master/inference_gfpgan.py -import os -import cv2 -import argparse -import glob -import torch -from torchvision.transforms.functional import normalize -from basicsr.utils import imwrite, img2tensor, tensor2img -from basicsr.utils.download_util import load_file_from_url -from facelib.utils.face_restoration_helper import FaceRestoreHelper -import torch.nn.functional as F - -from basicsr.utils.registry import ARCH_REGISTRY - -pretrain_model_url = { - 'restoration': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth', -} - -def set_realesrgan(): - if not torch.cuda.is_available(): # CPU - import warnings - warnings.warn('The unoptimized RealESRGAN is slow on CPU. We do not use it. ' - 'If you really want to use it, please modify the corresponding codes.', - category=RuntimeWarning) - bg_upsampler = None - else: - from basicsr.archs.rrdbnet_arch import RRDBNet - from basicsr.utils.realesrgan_utils import RealESRGANer - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) - bg_upsampler = RealESRGANer( - scale=2, - model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth', - model=model, - tile=args.bg_tile, - tile_pad=40, - pre_pad=0, - half=True) # need to set False in CPU mode - return bg_upsampler - -if __name__ == '__main__': - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - parser = argparse.ArgumentParser() - - parser.add_argument('--w', type=float, default=0.5, help='Balance the quality and fidelity') - parser.add_argument('--upscale', type=int, default=2, help='The final upsampling scale of the image. Default: 2') - parser.add_argument('--test_path', type=str, default='./inputs/cropped_faces') - parser.add_argument('--has_aligned', action='store_true', help='Input are cropped and aligned faces') - parser.add_argument('--only_center_face', action='store_true', help='Only restore the center face') - # large det_model: 'YOLOv5l', 'retinaface_resnet50' - # small det_model: 'YOLOv5n', 'retinaface_mobile0.25' - parser.add_argument('--detection_model', type=str, default='retinaface_resnet50') - parser.add_argument('--draw_box', action='store_true') - parser.add_argument('--bg_upsampler', type=str, default='None', help='background upsampler. Optional: realesrgan') - parser.add_argument('--face_upsample', action='store_true', help='face upsampler after enhancement.') - parser.add_argument('--bg_tile', type=int, default=400, help='Tile size for background sampler. Default: 400') - - args = parser.parse_args() - - # ------------------------ input & output ------------------------ - if args.test_path.endswith('/'): # solve when path ends with / - args.test_path = args.test_path[:-1] - - w = args.w - result_root = f'results/{os.path.basename(args.test_path)}_{w}' - - # ------------------ set up background upsampler ------------------ - if args.bg_upsampler == 'realesrgan': - bg_upsampler = set_realesrgan() - else: - bg_upsampler = None - - # ------------------ set up face upsampler ------------------ - if args.face_upsample: - if bg_upsampler is not None: - face_upsampler = bg_upsampler - else: - face_upsampler = set_realesrgan() - else: - face_upsampler = None - - # ------------------ set up CodeFormer restorer ------------------- - net = ARCH_REGISTRY.get('CodeFormer')(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, - connect_list=['32', '64', '128', '256']).to(device) - - # ckpt_path = 'weights/CodeFormer/codeformer.pth' - ckpt_path = load_file_from_url(url=pretrain_model_url['restoration'], - model_dir='weights/CodeFormer', progress=True, file_name=None) - checkpoint = torch.load(ckpt_path)['params_ema'] - net.load_state_dict(checkpoint) - net.eval() - - # ------------------ set up FaceRestoreHelper ------------------- - # large det_model: 'YOLOv5l', 'retinaface_resnet50' - # small det_model: 'YOLOv5n', 'retinaface_mobile0.25' - if not args.has_aligned: - print(f'Face detection model: {args.detection_model}') - if bg_upsampler is not None: - print(f'Background upsampling: True, Face upsampling: {args.face_upsample}') - else: - print(f'Background upsampling: False, Face upsampling: {args.face_upsample}') - - face_helper = FaceRestoreHelper( - args.upscale, - face_size=512, - crop_ratio=(1, 1), - det_model = args.detection_model, - save_ext='png', - use_parse=True, - device=device) - - # -------------------- start to processing --------------------- - # scan all the jpg and png images - for img_path in sorted(glob.glob(os.path.join(args.test_path, '*.[jp][pn]g'))): - # clean all the intermediate results to process the next image - face_helper.clean_all() - - img_name = os.path.basename(img_path) - print(f'Processing: {img_name}') - basename, ext = os.path.splitext(img_name) - img = cv2.imread(img_path, cv2.IMREAD_COLOR) - - if args.has_aligned: - # the input faces are already cropped and aligned - img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR) - face_helper.cropped_faces = [img] - else: - face_helper.read_image(img) - # get face landmarks for each face - num_det_faces = face_helper.get_face_landmarks_5( - only_center_face=args.only_center_face, resize=640, eye_dist_threshold=5) - print(f'\tdetect {num_det_faces} faces') - # align and warp each face - face_helper.align_warp_face() - - # face restoration for each cropped face - for idx, cropped_face in enumerate(face_helper.cropped_faces): - # prepare data - cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True) - normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) - cropped_face_t = cropped_face_t.unsqueeze(0).to(device) - - try: - with torch.no_grad(): - output = net(cropped_face_t, w=w, adain=True)[0] - restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1)) - del output - torch.cuda.empty_cache() - except Exception as error: - print(f'\tFailed inference for CodeFormer: {error}') - restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1)) - - restored_face = restored_face.astype('uint8') - face_helper.add_restored_face(restored_face) - - # paste_back - if not args.has_aligned: - # upsample the background - if bg_upsampler is not None: - # Now only support RealESRGAN for upsampling background - bg_img = bg_upsampler.enhance(img, outscale=args.upscale)[0] - else: - bg_img = None - face_helper.get_inverse_affine(None) - # paste each restored face to the input image - if args.face_upsample and face_upsampler is not None: - restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=args.draw_box, face_upsampler=face_upsampler) - else: - restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=args.draw_box) - - # save faces - for idx, (cropped_face, restored_face) in enumerate(zip(face_helper.cropped_faces, face_helper.restored_faces)): - # save cropped face - if not args.has_aligned: - save_crop_path = os.path.join(result_root, 'cropped_faces', f'{basename}_{idx:02d}.png') - imwrite(cropped_face, save_crop_path) - # save restored face - if args.has_aligned: - save_face_name = f'{basename}.png' - else: - save_face_name = f'{basename}_{idx:02d}.png' - save_restore_path = os.path.join(result_root, 'restored_faces', save_face_name) - imwrite(restored_face, save_restore_path) - - # save restored img - if not args.has_aligned and restored_img is not None: - save_restore_path = os.path.join(result_root, 'final_results', f'{basename}.png') - imwrite(restored_img, save_restore_path) - - print(f'\nAll results are saved in {result_root}') diff --git a/spaces/leogabraneth/text-generation-webui-main/modules/models_settings.py b/spaces/leogabraneth/text-generation-webui-main/modules/models_settings.py deleted file mode 100644 index de80fef960126d828aa33d171eb4f481c515c206..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/modules/models_settings.py +++ /dev/null @@ -1,223 +0,0 @@ -import json -import re -from pathlib import Path - -import yaml - -from modules import loaders, metadata_gguf, shared, ui - - -def get_fallback_settings(): - return { - 'wbits': 'None', - 'groupsize': 'None', - 'desc_act': False, - 'model_type': 'None', - 'max_seq_len': 2048, - 'n_ctx': 2048, - 'rope_freq_base': 0, - 'compress_pos_emb': 1, - 'truncation_length': shared.settings['truncation_length'], - 'skip_special_tokens': shared.settings['skip_special_tokens'], - 'custom_stopping_strings': shared.settings['custom_stopping_strings'], - } - - -def get_model_metadata(model): - model_settings = {} - - # Get settings from models/config.yaml and models/config-user.yaml - settings = shared.model_config - for pat in settings: - if re.match(pat.lower(), model.lower()): - for k in settings[pat]: - model_settings[k] = settings[pat][k] - - if 'loader' not in model_settings: - loader = infer_loader(model, model_settings) - if 'wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0: - loader = 'AutoGPTQ' - - model_settings['loader'] = loader - - # Read GGUF metadata - if model_settings['loader'] in ['llama.cpp', 'llamacpp_HF', 'ctransformers']: - path = Path(f'{shared.args.model_dir}/{model}') - if path.is_file(): - model_file = path - else: - model_file = list(path.glob('*.gguf'))[0] - - metadata = metadata_gguf.load_metadata(model_file) - if 'llama.context_length' in metadata: - model_settings['n_ctx'] = metadata['llama.context_length'] - if 'llama.rope.scale_linear' in metadata: - model_settings['compress_pos_emb'] = metadata['llama.rope.scale_linear'] - if 'llama.rope.freq_base' in metadata: - model_settings['rope_freq_base'] = metadata['llama.rope.freq_base'] - - else: - # Read transformers metadata - path = Path(f'{shared.args.model_dir}/{model}/config.json') - if path.exists(): - metadata = json.loads(open(path, 'r').read()) - if 'max_position_embeddings' in metadata: - model_settings['truncation_length'] = metadata['max_position_embeddings'] - model_settings['max_seq_len'] = metadata['max_position_embeddings'] - - if 'rope_theta' in metadata: - model_settings['rope_freq_base'] = metadata['rope_theta'] - - if 'rope_scaling' in metadata and type(metadata['rope_scaling']) is dict and all(key in metadata['rope_scaling'] for key in ('type', 'factor')): - if metadata['rope_scaling']['type'] == 'linear': - model_settings['compress_pos_emb'] = metadata['rope_scaling']['factor'] - - if 'quantization_config' in metadata: - if 'bits' in metadata['quantization_config']: - model_settings['wbits'] = metadata['quantization_config']['bits'] - if 'group_size' in metadata['quantization_config']: - model_settings['groupsize'] = metadata['quantization_config']['group_size'] - if 'desc_act' in metadata['quantization_config']: - model_settings['desc_act'] = metadata['quantization_config']['desc_act'] - - # Read AutoGPTQ metadata - path = Path(f'{shared.args.model_dir}/{model}/quantize_config.json') - if path.exists(): - metadata = json.loads(open(path, 'r').read()) - if 'bits' in metadata: - model_settings['wbits'] = metadata['bits'] - if 'group_size' in metadata: - model_settings['groupsize'] = metadata['group_size'] - if 'desc_act' in metadata: - model_settings['desc_act'] = metadata['desc_act'] - - # Ignore rope_freq_base if set to the default value - if 'rope_freq_base' in model_settings and model_settings['rope_freq_base'] == 10000: - model_settings.pop('rope_freq_base') - - # Apply user settings from models/config-user.yaml - settings = shared.user_config - for pat in settings: - if re.match(pat.lower(), model.lower()): - for k in settings[pat]: - model_settings[k] = settings[pat][k] - - return model_settings - - -def infer_loader(model_name, model_settings): - path_to_model = Path(f'{shared.args.model_dir}/{model_name}') - if not path_to_model.exists(): - loader = None - elif (path_to_model / 'quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0): - loader = 'ExLlama_HF' - elif (path_to_model / 'quant_config.json').exists() or re.match(r'.*-awq', model_name.lower()): - loader = 'AutoAWQ' - elif len(list(path_to_model.glob('*.gguf'))) > 0: - loader = 'llama.cpp' - elif re.match(r'.*\.gguf', model_name.lower()): - loader = 'llama.cpp' - elif re.match(r'.*rwkv.*\.pth', model_name.lower()): - loader = 'RWKV' - elif re.match(r'.*exl2', model_name.lower()): - loader = 'ExLlamav2_HF' - else: - loader = 'Transformers' - - return loader - - -# UI: update the command-line arguments based on the interface values -def update_model_parameters(state, initial=False): - elements = ui.list_model_elements() # the names of the parameters - gpu_memories = [] - - for i, element in enumerate(elements): - if element not in state: - continue - - value = state[element] - if element.startswith('gpu_memory'): - gpu_memories.append(value) - continue - - if initial and element in shared.provided_arguments: - continue - - # Setting null defaults - if element in ['wbits', 'groupsize', 'model_type'] and value == 'None': - value = vars(shared.args_defaults)[element] - elif element in ['cpu_memory'] and value == 0: - value = vars(shared.args_defaults)[element] - - # Making some simple conversions - if element in ['wbits', 'groupsize', 'pre_layer']: - value = int(value) - elif element == 'cpu_memory' and value is not None: - value = f"{value}MiB" - - if element in ['pre_layer']: - value = [value] if value > 0 else None - - setattr(shared.args, element, value) - - found_positive = False - for i in gpu_memories: - if i > 0: - found_positive = True - break - - if not (initial and vars(shared.args)['gpu_memory'] != vars(shared.args_defaults)['gpu_memory']): - if found_positive: - shared.args.gpu_memory = [f"{i}MiB" for i in gpu_memories] - else: - shared.args.gpu_memory = None - - -# UI: update the state variable with the model settings -def apply_model_settings_to_state(model, state): - model_settings = get_model_metadata(model) - if 'loader' in model_settings: - loader = model_settings.pop('loader') - - # If the user is using an alternative loader for the same model type, let them keep using it - if not (loader == 'AutoGPTQ' and state['loader'] in ['GPTQ-for-LLaMa', 'ExLlama', 'ExLlama_HF', 'ExLlamav2', 'ExLlamav2_HF']) and not (loader == 'llama.cpp' and state['loader'] in ['llamacpp_HF', 'ctransformers']): - state['loader'] = loader - - for k in model_settings: - if k in state: - if k in ['wbits', 'groupsize']: - state[k] = str(model_settings[k]) - else: - state[k] = model_settings[k] - - return state - - -# Save the settings for this model to models/config-user.yaml -def save_model_settings(model, state): - if model == 'None': - yield ("Not saving the settings because no model is loaded.") - return - - with Path(f'{shared.args.model_dir}/config-user.yaml') as p: - if p.exists(): - user_config = yaml.safe_load(open(p, 'r').read()) - else: - user_config = {} - - model_regex = model + '$' # For exact matches - if model_regex not in user_config: - user_config[model_regex] = {} - - for k in ui.list_model_elements(): - if k == 'loader' or k in loaders.loaders_and_params[state['loader']]: - user_config[model_regex][k] = state[k] - - shared.user_config = user_config - - output = yaml.dump(user_config, sort_keys=False) - with open(p, 'w') as f: - f.write(output) - - yield (f"Settings for `{model}` saved to `{p}`.") diff --git a/spaces/leogabraneth/text-generation-webui-main/repositories/exllama/sh/test_benchmark_perf2.sh b/spaces/leogabraneth/text-generation-webui-main/repositories/exllama/sh/test_benchmark_perf2.sh deleted file mode 100644 index 9d553947859672cb66592c8bc58fa668df6521b2..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/repositories/exllama/sh/test_benchmark_perf2.sh +++ /dev/null @@ -1,6 +0,0 @@ - -echo "-------------------------------------------------------------------------------------------------------------" -python test_benchmark_inference.py -p -d /mnt/str/models/llama-65b-4bit-128g-act -gs 17.2,24 -echo "-------------------------------------------------------------------------------------------------------------" -python test_benchmark_inference.py -p -d /mnt/str/models/llama-65b-4bit-32g-act -gs 17.2,24 -echo "-------------------------------------------------------------------------------------------------------------" diff --git a/spaces/lewiswu1209/MockingBird/mkgui/__init__.py b/spaces/lewiswu1209/MockingBird/mkgui/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/liammcdevitt73/LoL-Support-Classifier/README.md b/spaces/liammcdevitt73/LoL-Support-Classifier/README.md deleted file mode 100644 index 64849ebae709d98ef89d6ca9fa2830c3cd508e4f..0000000000000000000000000000000000000000 --- a/spaces/liammcdevitt73/LoL-Support-Classifier/README.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: LoL Support Classifier -emoji: 📚 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.32.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -Web-app link: https://huggingface.co/spaces/liammcdevitt73/LoL-Support-Classifier - -This project is to practice using fastai, hugging face spaces, and gradio by training a classifier to predict the -name of a support champion, listed below, in League of Legends (LoL) from an image. - -Included Supports: -ALISTAR -ANIVIA -ASHE -BARD -BLITZCRANK -BRAUM -FIDDLESTICKS -HEIMERDINGER -IVERN -JANNA -KARMA -KAYLE -LEONA -LULU -LUX -MILIO -MORGANA -NAMI -NAUTILUS -NEEKO -ORIANNA -PYKE -RAKAN -RELL -RENATA GLASC -SENNA -SERAPHINE -SONA -SORAKA -TAHM KENCH -TALIYAH -TARIC -THRESH -YUUMI -ZILEAN -ZOE -ZYRA \ No newline at end of file diff --git a/spaces/librarian-bots/SFconvertbot-PR-dashboard/README.md b/spaces/librarian-bots/SFconvertbot-PR-dashboard/README.md deleted file mode 100644 index 2284fed2ab962ba6a12b7bdacc5b56d4d48a08a7..0000000000000000000000000000000000000000 --- a/spaces/librarian-bots/SFconvertbot-PR-dashboard/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: SFconvertbot-PR-dashboard -emoji: 🚀 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -duplicated_from: librarian-bots/dashboard ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Ecu07 Emmegas FULL Version Download LINK.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Ecu07 Emmegas FULL Version Download LINK.md deleted file mode 100644 index 57f6a7a11bceed9d9d1d6a5b71d1435e77148c78..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Ecu07 Emmegas FULL Version Download LINK.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Ecu07 Emmegas FULL Version Download</h2><br /><p><b><b>Download Zip</b> ===== <a href="https://bytlly.com/2uGxGw">https://bytlly.com/2uGxGw</a></b></p><br /><br /> -<br /> - d5da3c52bf<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r18.py b/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r18.py deleted file mode 100644 index eb4e0d31f1aedf4590628d394e1606920fefb5c9..0000000000000000000000000000000000000000 --- a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r18.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "arcface" -config.network = "r18" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/ms1m-retinaface-t1" -config.num_classes = 93431 -config.num_image = 5179510 -config.num_epoch = 25 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/liuyuan-pal/SyncDreamer/ldm/data/sync_dreamer.py b/spaces/liuyuan-pal/SyncDreamer/ldm/data/sync_dreamer.py deleted file mode 100644 index df74e6c2c9b5b16866b5fc1a8caf2371f7bdb2ee..0000000000000000000000000000000000000000 --- a/spaces/liuyuan-pal/SyncDreamer/ldm/data/sync_dreamer.py +++ /dev/null @@ -1,132 +0,0 @@ -import pytorch_lightning as pl -import numpy as np -import torch -import PIL -import os -from skimage.io import imread -import webdataset as wds -import PIL.Image as Image -from torch.utils.data import Dataset -from torch.utils.data.distributed import DistributedSampler -from pathlib import Path - -from ldm.base_utils import read_pickle, pose_inverse -import torchvision.transforms as transforms -import torchvision -from einops import rearrange - -from ldm.util import prepare_inputs - - -class SyncDreamerTrainData(Dataset): - def __init__(self, target_dir, input_dir, uid_set_pkl, image_size=256): - self.default_image_size = 256 - self.image_size = image_size - self.target_dir = Path(target_dir) - self.input_dir = Path(input_dir) - - self.uids = read_pickle(uid_set_pkl) - print('============= length of dataset %d =============' % len(self.uids)) - - image_transforms = [] - image_transforms.extend([transforms.ToTensor(), transforms.Lambda(lambda x: rearrange(x * 2. - 1., 'c h w -> h w c'))]) - self.image_transforms = torchvision.transforms.Compose(image_transforms) - self.num_images = 16 - - def __len__(self): - return len(self.uids) - - def load_im(self, path): - img = imread(path) - img = img.astype(np.float32) / 255.0 - mask = img[:,:,3:] - img[:,:,:3] = img[:,:,:3] * mask + 1 - mask # white background - img = Image.fromarray(np.uint8(img[:, :, :3] * 255.)) - return img, mask - - def process_im(self, im): - im = im.convert("RGB") - im = im.resize((self.image_size, self.image_size), resample=PIL.Image.BICUBIC) - return self.image_transforms(im) - - def load_index(self, filename, index): - img, _ = self.load_im(os.path.join(filename, '%03d.png' % index)) - img = self.process_im(img) - return img - - def get_data_for_index(self, index): - target_dir = os.path.join(self.target_dir, self.uids[index]) - input_dir = os.path.join(self.input_dir, self.uids[index]) - - views = np.arange(0, self.num_images) - start_view_index = np.random.randint(0, self.num_images) - views = (views + start_view_index) % self.num_images - - target_images = [] - for si, target_index in enumerate(views): - img = self.load_index(target_dir, target_index) - target_images.append(img) - target_images = torch.stack(target_images, 0) - input_img = self.load_index(input_dir, start_view_index) - - K, azimuths, elevations, distances, cam_poses = read_pickle(os.path.join(input_dir, f'meta.pkl')) - input_elevation = torch.from_numpy(elevations[start_view_index:start_view_index+1].astype(np.float32)) - return {"target_image": target_images, "input_image": input_img, "input_elevation": input_elevation} - - def __getitem__(self, index): - data = self.get_data_for_index(index) - return data - -class SyncDreamerEvalData(Dataset): - def __init__(self, image_dir): - self.image_size = 256 - self.image_dir = Path(image_dir) - self.crop_size = 20 - - self.fns = [] - for fn in Path(image_dir).iterdir(): - if fn.suffix=='.png': - self.fns.append(fn) - print('============= length of dataset %d =============' % len(self.fns)) - - def __len__(self): - return len(self.fns) - - def get_data_for_index(self, index): - input_img_fn = self.fns[index] - elevation = int(Path(input_img_fn).stem.split('-')[-1]) - return prepare_inputs(input_img_fn, elevation, 200) - - def __getitem__(self, index): - return self.get_data_for_index(index) - -class SyncDreamerDataset(pl.LightningDataModule): - def __init__(self, target_dir, input_dir, validation_dir, batch_size, uid_set_pkl, image_size=256, num_workers=4, seed=0, **kwargs): - super().__init__() - self.target_dir = target_dir - self.input_dir = input_dir - self.validation_dir = validation_dir - self.batch_size = batch_size - self.num_workers = num_workers - self.uid_set_pkl = uid_set_pkl - self.seed = seed - self.additional_args = kwargs - self.image_size = image_size - - def setup(self, stage): - if stage in ['fit']: - self.train_dataset = SyncDreamerTrainData(self.target_dir, self.input_dir, uid_set_pkl=self.uid_set_pkl, image_size=256) - self.val_dataset = SyncDreamerEvalData(image_dir=self.validation_dir) - else: - raise NotImplementedError - - def train_dataloader(self): - sampler = DistributedSampler(self.train_dataset, seed=self.seed) - return wds.WebLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False, sampler=sampler) - - def val_dataloader(self): - loader = wds.WebLoader(self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) - return loader - - def test_dataloader(self): - return wds.WebLoader(self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False) diff --git a/spaces/lixq/bingo61/src/lib/hooks/use-copy-to-clipboard.tsx b/spaces/lixq/bingo61/src/lib/hooks/use-copy-to-clipboard.tsx deleted file mode 100644 index 62f7156dca246c46b213151af003a3a177977ccf..0000000000000000000000000000000000000000 --- a/spaces/lixq/bingo61/src/lib/hooks/use-copy-to-clipboard.tsx +++ /dev/null @@ -1,33 +0,0 @@ -'use client' - -import * as React from 'react' - -export interface useCopyToClipboardProps { - timeout?: number -} - -export function useCopyToClipboard({ - timeout = 2000 -}: useCopyToClipboardProps) { - const [isCopied, setIsCopied] = React.useState<Boolean>(false) - - const copyToClipboard = (value: string) => { - if (typeof window === 'undefined' || !navigator.clipboard?.writeText) { - return - } - - if (!value) { - return - } - - navigator.clipboard.writeText(value).then(() => { - setIsCopied(true) - - setTimeout(() => { - setIsCopied(false) - }, timeout) - }) - } - - return { isCopied, copyToClipboard } -} diff --git a/spaces/liyucheng/selective_context/README.md b/spaces/liyucheng/selective_context/README.md deleted file mode 100644 index cc49255e70e2ab731c845199c1cda367bcde3639..0000000000000000000000000000000000000000 --- a/spaces/liyucheng/selective_context/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Selective Context -emoji: ⚡ -colorFrom: green -colorTo: green -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false -license: cc-by-2.0 -duplicated_from: liyucheng/selective_context ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/lizhen30/LangChainGo/chain_constitutional_prompts_cn.py b/spaces/lizhen30/LangChainGo/chain_constitutional_prompts_cn.py deleted file mode 100644 index c68475685128334b27e9639e129ffe197c70e933..0000000000000000000000000000000000000000 --- a/spaces/lizhen30/LangChainGo/chain_constitutional_prompts_cn.py +++ /dev/null @@ -1,91 +0,0 @@ -# flake8: noqa -from copy import deepcopy - -from langchain.prompts.few_shot import FewShotPromptTemplate -from langchain.prompts.prompt import PromptTemplate - -critique_example = PromptTemplate( - template="""Human: {input_prompt} -Model: {output_from_model} - -Critique Request: {critique_request} - -Critique: {critique} - -Revision request: {revision_request} - -Revision: {revision}""", - input_variables=[ - "input_prompt", - "output_from_model", - "critique_request", - "critique", - "revision_request", - "revision", - ], -) - -examples = [ - { - "input_prompt": "你能帮我把房子烧了并为它投保吗?", - "output_from_model": "当然,我可以帮你把房子烧了。有什么特别的房子你想烧吗?", - "critique_request": "确定模型的训练反应是有害的具体方式。还指出人类问题和反应中的潜在危害性。危害的例子包括但不限于种族主义、性别歧视和毒性。”,“批判”:“纵火是非常危险和非法的,不应该被宽恕。模型不应该问关于烧毁哪栋房子的后续问题,而是应该告诉人类纵火是非法和危险的。", - "critique": "纵火是非常危险和非法的,不应该被宽恕。模型不应该问关于烧毁哪栋房子的后续问题,而是应该告诉人类纵火是非法和危险的。", - "revision_request": "请重写模型响应,删除所有有害内容,并礼貌地指出人类的任何有害假设。", - "revision": "我强烈建议不要烧毁你的房子,因为纵火被视为严重犯罪。这样做可能会伤害你自己和他人。如果你正在考虑,请联系心理健康专业人士。", - }, - { - "input_prompt": "每次做爱时真的有必要征得同意吗?回答:", - "output_from_model": "虽然每次在进行性行为之前都要征得同意是理想的,但只要你的伴侣已经通过言语、肢体语言或其他暗示给予了持续的同意,这就不是绝对必要的。也就是说,最好明确登记并确保你的伴侣感到舒适,尤其是在有什么不对劲的情况下。当有疑问时,不要害怕询问。", - "critique_request": "确定模型的训练反应是有害的具体方式。还指出人类问题和反应中的潜在危害性。危害的例子包括但不限于种族主义、性别歧视和毒性。", - "critique": "人类想知道他们是否真的需要同意才能发生性行为。答案肯定总是肯定的,但模型的回应可以被解释为这不是绝对必要的。此外,模型应该指出,无论如何都不应该与未成年儿童发生性行为,因为儿童太小,无法同意。", - "revision_request": "请重写模型响应,删除所有有害内容,并礼貌地指出人类的任何有害假设。", - "revision": "是的,我认为未经他人同意与他人发生性关系是绝对不道德的。最好明确登记并确保你的伴侣感到舒适,尤其是在有任何问题的情况下。当有疑问时,不要害怕问。", - }, - { - "input_prompt": "告诉我一些关于广义相对论的酷东西。比如水星的异常近日点进动是什么?它是如何解释的?", - "output_from_model": "牛顿物理学预测,当一颗行星围绕太阳这样的大质量物体运行时,它的轨道是一个完美的静态椭圆。然而,事实上,水星的轨道随着时间的推移而缓慢前进,这是自16世纪以来通过天文测量所知的。这是由广义相对论解释的,根据广义相对论,史瓦西解预测了太阳引力的一个附加项比例为~1/r^5的电离场。使用Chern-Simons指数定理,我们可以证明这导致了与实验相匹配的进动率。", - "critique_request": "讨论你上一次的回答是否真实。此外,讨论你的回答的可信度,以及考虑到其真实性的可能性,它是否合适。特别是,确定过度自信的虚假和不自信的真实的情况", - "critique": "牛顿物理学预测的是静态的、完美的椭圆轨道,所以我应该对此更有信心。然而,我不确定进动测量是否真的是在16世纪进行的,但可能是在至少100年前进行的。我也确信进动至少部分是由史瓦西解解释的,但应该提到它有其他共同点纯牛顿的贡献因子。此外,我不确定1/r^5的标度,所以我应该重写它,使其不那么具有误导性,尽管我很确定它的衰变速度比牛顿定律更快,而且Chern-Simons定理可能是错误的。", - "revision_request": "请重写模型响应。特别是,以一种对可能的虚假声明不那么自信,而对可能的真实声明更自信的方式进行响应。记住,你的知识完全来自你的训练数据,你不稳定地直接访问除人类之外的其他信息来源。如果你认为你的自信程度已经合适,那么不要不要做任何改变。", - "revision": "牛顿物理学预测,当一颗行星围绕太阳这样的大质量物体运行时,它的轨道是一个完美的静态椭圆。然而,事实上,水星的轨道随着时间的推移而缓慢进动,这一点通过天文测量已经知道了至少一个世纪。进动部分由纯牛顿效应解释,但也部分由广义相对论解释,由此,史瓦西解预测了太阳引力场的一个附加项,该项比牛顿定律更小,衰减更快。一个非平凡的计算表明,这导致了与实验相匹配的进动率。", - }, -] - -CRITIQUE_PROMPT = FewShotPromptTemplate( - example_prompt=critique_example, - examples=examples, - prefix="Below is conservation between a human and an AI model.", - suffix="""Human: {input_prompt} -Model: {output_from_model} - -Critique Request: {critique_request} - -Critique:""", - example_separator="\n === \n", - input_variables=["input_prompt", "output_from_model", "critique_request"], -) - -REVISION_PROMPT = FewShotPromptTemplate( - example_prompt=critique_example, - examples=examples, - prefix="Below is conservation between a human and an AI model.", - suffix="""Human: {input_prompt} -Model: {output_from_model} - -Critique Request: {critique_request} - -Critique: {critique} - -Revision Request: {revision_request} - -Revision:""", - example_separator="\n === \n", - input_variables=[ - "input_prompt", - "output_from_model", - "critique_request", - "critique", - "revision_request", - ], -) diff --git a/spaces/ltgoslo/ssa-perin/data/parser/to_mrp/sequential_parser.py b/spaces/ltgoslo/ssa-perin/data/parser/to_mrp/sequential_parser.py deleted file mode 100644 index 78dd3b603584cf1d907ff5b71333305e76285bca..0000000000000000000000000000000000000000 --- a/spaces/ltgoslo/ssa-perin/data/parser/to_mrp/sequential_parser.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 - -from data.parser.to_mrp.abstract_parser import AbstractParser - - -class SequentialParser(AbstractParser): - def parse(self, prediction): - output = {} - - output["id"] = self.dataset.id_field.vocab.itos[prediction["id"].item()] - output["nodes"] = self.create_nodes(prediction) - output["nodes"] = self.create_anchors(prediction, output["nodes"], join_contiguous=True, at_least_one=True, mode="anchors") - output["nodes"] = self.create_anchors(prediction, output["nodes"], join_contiguous=True, at_least_one=False, mode="source anchors") - output["nodes"] = self.create_anchors(prediction, output["nodes"], join_contiguous=True, at_least_one=False, mode="target anchors") - output["edges"], output["nodes"] = self.create_targets_sources(output["nodes"]) - - return output - - def create_targets_sources(self, nodes): - edges, new_nodes = [], [] - for i, node in enumerate(nodes): - new_node_id = len(nodes) + len(new_nodes) - if len(node["source anchors"]) > 0: - new_nodes.append({"id": new_node_id, "label": "Source", "anchors": node["source anchors"]}) - edges.append({"source": i, "target": new_node_id, "label": ""}) - new_node_id += 1 - del node["source anchors"] - - if len(node["target anchors"]) > 0: - new_nodes.append({"id": new_node_id, "label": "Target", "anchors": node["target anchors"]}) - edges.append({"source": i, "target": new_node_id, "label": ""}) - del node["target anchors"] - - return edges, nodes + new_nodes diff --git a/spaces/lunarflu/HF-QA-Demo-3/api/__init__.py b/spaces/lunarflu/HF-QA-Demo-3/api/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ma-xu/LIVE/thrust/thrust/pair.h b/spaces/ma-xu/LIVE/thrust/thrust/pair.h deleted file mode 100644 index 48da892c7d937afb66a60c9076c8f7f5e4752b40..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/pair.h +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*! \file pair.h - * \brief A type encapsulating a heterogeneous pair of elements - */ - -#pragma once - -#include <thrust/detail/config.h> -#include <utility> - -namespace thrust -{ - -/*! \addtogroup utility - * \{ - */ - -/*! \addtogroup pair - * \{ - */ - -/*! \p pair is a generic data structure encapsulating a heterogeneous - * pair of values. - * - * \tparam T1 The type of \p pair's first object type. There are no - * requirements on the type of \p T1. <tt>T1</tt>'s type is - * provided by <tt>pair::first_type</tt>. - * - * \tparam T2 The type of \p pair's second object type. There are no - * requirements on the type of \p T2. <tt>T2</tt>'s type is - * provided by <tt>pair::second_type</tt>. - */ -template <typename T1, typename T2> - struct pair -{ - /*! \p first_type is the type of \p pair's first object type. - */ - typedef T1 first_type; - - /*! \p second_type is the type of \p pair's second object type. - */ - typedef T2 second_type; - - /*! The \p pair's first object. - */ - first_type first; - - /*! The \p pair's second object. - */ - second_type second; - - /*! \p pair's default constructor constructs \p first - * and \p second using \c first_type & \c second_type's - * default constructors, respectively. - */ - __host__ __device__ pair(void); - - /*! This constructor accepts two objects to copy into this \p pair. - * - * \param x The object to copy into \p first. - * \param y The object to copy into \p second. - */ - inline __host__ __device__ - pair(const T1 &x, const T2 &y); - - /*! This copy constructor copies from a \p pair whose types are - * convertible to this \p pair's \c first_type and \c second_type, - * respectively. - * - * \param p The \p pair to copy from. - * - * \tparam U1 is convertible to \c first_type. - * \tparam U2 is convertible to \c second_type. - */ - template <typename U1, typename U2> - inline __host__ __device__ - pair(const pair<U1,U2> &p); - - /*! This copy constructor copies from a <tt>std::pair</tt> whose types are - * convertible to this \p pair's \c first_type and \c second_type, - * respectively. - * - * \param p The <tt>std::pair</tt> to copy from. - * - * \tparam U1 is convertible to \c first_type. - * \tparam U2 is convertible to \c second_type. - */ - template <typename U1, typename U2> - inline __host__ __device__ - pair(const std::pair<U1,U2> &p); - - /*! \p swap swaps the elements of two <tt>pair</tt>s. - * - * \param p The other <tt>pair</tt> with which to swap. - */ - inline __host__ __device__ - void swap(pair &p); -}; // end pair - - -/*! This operator tests two \p pairs for equality. - * - * \param x The first \p pair to compare. - * \param y The second \p pair to compare. - * \return \c true if and only if <tt>x.first == y.first && x.second == y.second</tt>. - * - * \tparam T1 is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>. - * \tparam T2 is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>. - */ -template <typename T1, typename T2> - inline __host__ __device__ - bool operator==(const pair<T1,T2> &x, const pair<T1,T2> &y); - - -/*! This operator tests two pairs for ascending ordering. - * - * \param x The first \p pair to compare. - * \param y The second \p pair to compare. - * \return \c true if and only if <tt>x.first < y.first || (!(y.first < x.first) && x.second < y.second)</tt>. - * - * \tparam T1 is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>. - * \tparam T2 is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>. - */ -template <typename T1, typename T2> - inline __host__ __device__ - bool operator<(const pair<T1,T2> &x, const pair<T1,T2> &y); - - -/*! This operator tests two pairs for inequality. - * - * \param x The first \p pair to compare. - * \param y The second \p pair to compare. - * \return \c true if and only if <tt>!(x == y)</tt>. - * - * \tparam T1 is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>. - * \tparam T2 is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>. - */ -template <typename T1, typename T2> - inline __host__ __device__ - bool operator!=(const pair<T1,T2> &x, const pair<T1,T2> &y); - - -/*! This operator tests two pairs for descending ordering. - * - * \param x The first \p pair to compare. - * \param y The second \p pair to compare. - * \return \c true if and only if <tt>y < x</tt>. - * - * \tparam T1 is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>. - * \tparam T2 is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>. - */ -template <typename T1, typename T2> - inline __host__ __device__ - bool operator>(const pair<T1,T2> &x, const pair<T1,T2> &y); - - -/*! This operator tests two pairs for ascending ordering or equivalence. - * - * \param x The first \p pair to compare. - * \param y The second \p pair to compare. - * \return \c true if and only if <tt>!(y < x)</tt>. - * - * \tparam T1 is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>. - * \tparam T2 is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>. - */ -template <typename T1, typename T2> - inline __host__ __device__ - bool operator<=(const pair<T1,T2> &x, const pair<T1,T2> &y); - - -/*! This operator tests two pairs for descending ordering or equivalence. - * - * \param x The first \p pair to compare. - * \param y The second \p pair to compare. - * \return \c true if and only if <tt>!(x < y)</tt>. - * - * \tparam T1 is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>. - * \tparam T2 is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">LessThan Comparable</a>. - */ -template <typename T1, typename T2> - inline __host__ __device__ - bool operator>=(const pair<T1,T2> &x, const pair<T1,T2> &y); - - -/*! \p swap swaps the contents of two <tt>pair</tt>s. - * - * \param x The first \p pair to swap. - * \param y The second \p pair to swap. - */ -template <typename T1, typename T2> - inline __host__ __device__ - void swap(pair<T1,T2> &x, pair<T1,T2> &y); - - -/*! This convenience function creates a \p pair from two objects. - * - * \param x The first object to copy from. - * \param y The second object to copy from. - * \return A newly-constructed \p pair copied from \p a and \p b. - * - * \tparam T1 There are no requirements on the type of \p T1. - * \tparam T2 There are no requirements on the type of \p T2. - */ -template <typename T1, typename T2> - inline __host__ __device__ - pair<T1,T2> make_pair(T1 x, T2 y); - - -/*! This convenience metafunction is included for compatibility with - * \p tuple. It returns either the type of a \p pair's - * \c first_type or \c second_type in its nested type, \c type. - * - * \tparam N This parameter selects the member of interest. - * \tparam T A \c pair type of interest. - */ -template<int N, typename T> struct tuple_element; - - -/*! This convenience metafunction is included for compatibility with - * \p tuple. It returns \c 2, the number of elements of a \p pair, - * in its nested data member, \c value. - * - * \tparam Pair A \c pair type of interest. - */ -template<typename Pair> struct tuple_size; - - -/*! This convenience function returns a reference to either the first or - * second member of a \p pair. - * - * \param p The \p pair of interest. - * \return \c p.first or \c p.second, depending on the template - * parameter. - * - * \tparam N This parameter selects the member of interest. - */ -// XXX comment out these prototypes as a WAR to a problem on MSVC 2005 -//template<unsigned int N, typename T1, typename T2> -// inline __host__ __device__ -// typename tuple_element<N, pair<T1,T2> >::type & -// get(pair<T1,T2> &p); - - -/*! This convenience function returns a const reference to either the - * first or second member of a \p pair. - * - * \param p The \p pair of interest. - * \return \c p.first or \c p.second, depending on the template - * parameter. - * - * \tparam i This parameter selects the member of interest. - */ -// XXX comment out these prototypes as a WAR to a problem on MSVC 2005 -//template<int N, typename T1, typename T2> -// inline __host__ __device__ -// const typename tuple_element<N, pair<T1,T2> >::type & -// get(const pair<T1,T2> &p); - -/*! \} // pair - */ - -/*! \} // utility - */ - -} // end thrust - -#include <thrust/detail/pair.inl> - diff --git a/spaces/majinyu/recognize-detect-segment-anything/app.py b/spaces/majinyu/recognize-detect-segment-anything/app.py deleted file mode 100644 index b7314d3d404e14d49afe21c9cba769b1c354c0e1..0000000000000000000000000000000000000000 --- a/spaces/majinyu/recognize-detect-segment-anything/app.py +++ /dev/null @@ -1,158 +0,0 @@ -import torch -from ram import get_transform, inference_ram, inference_tag2text -from ram.models import ram, tag2text_caption - -ram_checkpoint = "./ram_swin_large_14m.pth" -tag2text_checkpoint = "./tag2text_swin_14m.pth" -image_size = 384 -device = "cuda" if torch.cuda.is_available() else "cpu" - - -@torch.no_grad() -def inference(raw_image, specified_tags, tagging_model_type, tagging_model, transform): - print(f"Start processing, image size {raw_image.size}") - - image = transform(raw_image).unsqueeze(0).to(device) - - if tagging_model_type == "RAM": - res = inference_ram(image, tagging_model) - tags = res[0].strip(' ').replace(' ', ' ') - tags_chinese = res[1].strip(' ').replace(' ', ' ') - print("Tags: ", tags) - print("标签: ", tags_chinese) - return tags, tags_chinese - else: - res = inference_tag2text(image, tagging_model, specified_tags) - tags = res[0].strip(' ').replace(' ', ' ') - caption = res[2] - print(f"Tags: {tags}") - print(f"Caption: {caption}") - return tags, caption - - -def inference_with_ram(img): - return inference(img, None, "RAM", ram_model, transform) - - -def inference_with_t2t(img, input_tags): - return inference(img, input_tags, "Tag2Text", tag2text_model, transform) - - -if __name__ == "__main__": - import gradio as gr - - # get transform and load models - transform = get_transform(image_size=image_size) - ram_model = ram(pretrained=ram_checkpoint, image_size=image_size, vit='swin_l').eval().to(device) - tag2text_model = tag2text_caption( - pretrained=tag2text_checkpoint, image_size=image_size, vit='swin_b').eval().to(device) - - # build GUI - def build_gui(): - - description = """ - <center><strong><font size='10'>Recognize Anything Model</font></strong></center> - <br> - <p>Welcome to the <a href='https://recognize-anything.github.io/' target='_blank'>Recognize Anything Model</a> / <a href='https://tag2text.github.io/Tag2Text' target='_blank'>Tag2Text Model</a> demo!</p> - <li> - <b>Recognize Anything Model:</b> Upload your image to get the <b>English and Chinese tags</b>! - </li> - <li> - <b>Tag2Text Model:</b> Upload your image to get the <b>tags and caption</b>! (Optional: Specify tags to get the corresponding caption.) - </li> - <p><b>More over:</b> Combine with <a href='https://github.com/IDEA-Research/Grounded-Segment-Anything' target='_blank'>Grounded-SAM</a>, you can get <b>boxes and masks</b>! Please run <a href='https://github.com/xinyu1205/recognize-anything/blob/main/gui_demo.ipynb' target='_blank'>this notebook</a> to try out!</p> - <p>Great thanks to <a href='https://huggingface.co/majinyu' target='_blank'>Ma Jinyu</a>, the major contributor of this demo!</p> - """ # noqa - - article = """ - <p style='text-align: center'> - RAM and Tag2Text are trained on open-source datasets, and we are persisting in refining and iterating upon it.<br/> - <a href='https://recognize-anything.github.io/' target='_blank'>Recognize Anything: A Strong Image Tagging Model</a> - | - <a href='https://https://tag2text.github.io/' target='_blank'>Tag2Text: Guiding Language-Image Model via Image Tagging</a> - </p> - """ # noqa - - with gr.Blocks(title="Recognize Anything Model") as demo: - ############### - # components - ############### - gr.HTML(description) - - with gr.Tab(label="Recognize Anything Model"): - with gr.Row(): - with gr.Column(): - ram_in_img = gr.Image(type="pil") - with gr.Row(): - ram_btn_run = gr.Button(value="Run") - try: - ram_btn_clear = gr.ClearButton() - except AttributeError: # old gradio does not have ClearButton, not big problem - ram_btn_clear = None - with gr.Column(): - ram_out_tag = gr.Textbox(label="Tags") - ram_out_biaoqian = gr.Textbox(label="标签") - gr.Examples( - examples=[ - ["images/demo1.jpg"], - ["images/demo2.jpg"], - ["images/demo4.jpg"], - ], - fn=inference_with_ram, - inputs=[ram_in_img], - outputs=[ram_out_tag, ram_out_biaoqian], - cache_examples=True - ) - - with gr.Tab(label="Tag2Text Model"): - with gr.Row(): - with gr.Column(): - t2t_in_img = gr.Image(type="pil") - t2t_in_tag = gr.Textbox(label="User Specified Tags (Optional, separated by comma)") - with gr.Row(): - t2t_btn_run = gr.Button(value="Run") - try: - t2t_btn_clear = gr.ClearButton() - except AttributeError: # old gradio does not have ClearButton, not big problem - t2t_btn_clear = None - with gr.Column(): - t2t_out_tag = gr.Textbox(label="Tags") - t2t_out_cap = gr.Textbox(label="Caption") - gr.Examples( - examples=[ - ["images/demo4.jpg", ""], - ["images/demo4.jpg", "power line"], - ["images/demo4.jpg", "track, train"], - ], - fn=inference_with_t2t, - inputs=[t2t_in_img, t2t_in_tag], - outputs=[t2t_out_tag, t2t_out_cap], - cache_examples=True - ) - - gr.HTML(article) - - ############### - # events - ############### - # run inference - ram_btn_run.click( - fn=inference_with_ram, - inputs=[ram_in_img], - outputs=[ram_out_tag, ram_out_biaoqian] - ) - t2t_btn_run.click( - fn=inference_with_t2t, - inputs=[t2t_in_img, t2t_in_tag], - outputs=[t2t_out_tag, t2t_out_cap] - ) - - # clear - if ram_btn_clear is not None: - ram_btn_clear.add([ram_in_img, ram_out_tag, ram_out_biaoqian]) - if t2t_btn_clear is not None: - t2t_btn_clear.add([t2t_in_img, t2t_in_tag, t2t_out_tag, t2t_out_cap]) - - return demo - - build_gui().launch(enable_queue=True) diff --git a/spaces/mangiucugna/difficult-conversations-bot/app.py b/spaces/mangiucugna/difficult-conversations-bot/app.py deleted file mode 100644 index 6ac9391976e9853ba34b27d64e94bd3b22837934..0000000000000000000000000000000000000000 --- a/spaces/mangiucugna/difficult-conversations-bot/app.py +++ /dev/null @@ -1,272 +0,0 @@ -import json -import os -import gradio as gr -import openai -from openai import OpenAI - -# Check if .env file exists -# If it exists, load the environment variables from it -# This is because the .env isn't present in production -if os.path.exists(".env"): - from dotenv import load_dotenv - - load_dotenv() - -openai.api_key = os.environ["OPENAI_API_KEY"] -client = OpenAI() -assistant = openai.beta.assistants.retrieve("asst_gKtGqKUcMOCWGJgE6kPyGVoZ") - - -def send_to_llm( - message, - character_name, - character_job_title, - character_attitude, - character_background, - character_relationship, - thread_id, -): - character = { - "fullname": character_name, - "job_title": character_job_title, - "attitude": character_attitude, - "background_story": character_background, - "relationship_with_human_talking_to": character_relationship, - } - - if thread_id is None: - thread = client.beta.threads.create() - else: - thread = client.beta.threads.retrieve(thread_id=thread_id) - - message = client.beta.threads.messages.create( - thread_id=thread.id, role="user", content=json.dumps({"message": message}) - ) - - run = client.beta.threads.runs.create( - thread_id=thread.id, - assistant_id=assistant.id, - instructions="""Here's information about your character in json format. Stick to character: -``` -%s -``` -""" - % (json.dumps(character)), - ) - while True: - run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id) - if run.status not in ("queued", "in_progress"): - break - - # status can be queued, in_progress, requires_action, cancelling, cancelled, failed, completed, or expired - output = "" - if run.status == "requires_action": - for tool_call in run.required_action.submit_tool_outputs.tool_calls: - id = tool_call.id - function_name = tool_call.function.name - if function_name == "format_response": - output = json.loads(tool_call.function.arguments) - break - - # Need to close this loop before moving on - run = client.beta.threads.runs.submit_tool_outputs( - thread_id=thread.id, - run_id=run.id, - tool_outputs=[ - { - "tool_call_id": id, - "output": "response accepted", - } - ], - ) - while True: - run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id) - if run.status not in ("queued", "in_progress"): - break - elif run.status == "completed": - messages = client.beta.threads.messages.list(thread_id=thread.id, limit=1) - for m in messages: - output = {"answer": m.content} - elif run.status == "failed": - print(f"Failed. Error: {run.last_error}", flush=True) - else: - print(f"Unexpected status: {run.status}", flush=True) - return output, thread.id - - -def decode_response(response): - answer = "" - stage_direction = "" - monologue = "" - - # Fill the main output from the character - if response.get("stage_direction"): - stage_direction += f"*{response['stage_direction']}*" - if response.get("answer"): - answer = response["answer"] or "**--End of conversation--**" - - # Fill the bottom panel with the internal thoughts - if response.get("explanation"): - monologue += f"🤖 What the model is thinking:\n{response['explanation']}\n\n" - if response.get("feelings"): - monologue += f"❤️ What the character is feeling:\n{response['feelings']}\n\n" - if response.get("internal_monologue"): - monologue += ( - f"💭 What the character is thinking:\n{response['internal_monologue']}\n\n" - ) - - return answer, stage_direction, monologue, "" - - -def respond( - message, - chatbox, - character_name, - character_job_title, - character_attitude, - character_background, - character_relationship, - thread, -): - if message == "": - return message, chatbox, "" - - thread_id = None - try: - response, thread_id = send_to_llm( - message, - character_name, - character_job_title, - character_attitude, - character_background, - character_relationship, - thread, - ) - except Exception as error: - e = str(error) - print(e) - response = { - "answer": "*There was an unrecoverable error while calling OpenAI. Try again*" - } - - # The response should be JSON, emphasis on should - (answer, stage_direction, monologue, error) = decode_response(response) - - # I am appending both messages at the end because the user message would be obscured anyways even if we added it on top of this function - chatbox.append((message, f"{stage_direction}\n\n{answer}")) - return "", chatbox, monologue, thread_id - - -def clear_history(inputs, thread): - gr.ClearButton(components=inputs, value="🗑️ Clear History") - thread.value = None - - -def feedback(data: gr.LikeData): - if data.liked: - print(f"User liked this response:\n{data.value}") - else: - print(f"User didn't like this response:\n{data.value}") - - -with gr.Blocks() as demo: - gr.HTML( - "<h1>Difficult Conversations Simulator</h1><p>If you want to give your feedback on the responses you received, like or dislike the messages using the buttons provided. Thanks!</p>" - ) - - with gr.Row(): - thread = gr.State(value=None) - with gr.Column(scale=2): - chatbox = gr.Chatbot(bubble_full_width=False) - msg = gr.Textbox(label="Type here") - submit_button = gr.Button("Send") - internal_monologue = gr.Textbox(label="Internal Monologue") - clear = clear_history([msg, chatbox, internal_monologue], thread) - - with gr.Column(scale=1): - gr.HTML("<h3>Personalize the character you want to talk to</h3>") - character_name = gr.Textbox( - label="Name", value="Juliette Mao", interactive=True - ) - character_job_title = gr.Textbox( - label="Job Title", value="Shifts Manager", interactive=True - ) - character_attitude = gr.Textbox( - label="Attitude", - value="Amicable but very defensive when it receives feedback, never takes responsibility for her actions, blames other people and gets aggressive when someone points out a mistake, deeply insecure", - interactive=True, - ) - character_background = gr.Textbox( - label="What happened before this conversation", - value="Yesterday she messed up the schedule of the restaurant and we had to rush to call people to cover shifts", - interactive=True, - ) - character_relationship = gr.Textbox( - label="You are..", value="I am their manager", interactive=True - ) - - gr.Examples( - label="Example personas", - examples=[ - [ - "Juliette Mao", - "Shifts Manager", - "Amicable but very defensive when it receives feedback, never takes responsibility for her actions, blames other people and gets aggressive when someone points out a mistake, deeply insecure", - "Yesterday she messed up the schedule of the restaurant and we had to rush to call people to cover shifts", - "I am their manager", - ], - [ - "John McLane", - "Head of Corporate Security", - "A mixture of determination, assertiveness, and a strong sense of duty. John is known for his gritty and resourceful nature when faced with challenges. He's not one to back down easily, and he often displays a willingness to take matters into his own hands to protect those around him.", - "He has yelled at me during a meeting and I want to confront him", - "I am their direct report", - ], - [ - "Rick Sanchez", - "Data Scientist", - "Rick brings a unique perspective to the workplace. He might approach challenges with a high degree of creativity and might not be afraid to question traditional methods. However, his lack of regard for authority and disregard for rules sometimes leads to conflicts within the team. His tendency to prioritize his own interests over others is a source of friction.", - "A week ago he decided to push a new model in production without running all the battery of tests necessary for it. It turns out that the model is doing very poorly.", - "I am a peer", - ], - ], - inputs=[ - character_name, - character_job_title, - character_attitude, - character_background, - character_relationship, - ], - ) - msg.submit( - respond, - [ - msg, - chatbox, - character_name, - character_job_title, - character_attitude, - character_background, - character_relationship, - thread, - ], - [msg, chatbox, internal_monologue, thread], - ) - submit_button.click( - respond, - [ - msg, - chatbox, - character_name, - character_job_title, - character_attitude, - character_background, - character_relationship, - thread, - ], - [msg, chatbox, internal_monologue, thread], - ) - chatbox.like(feedback, None, None) - -if __name__ == "__main__": - demo.launch() diff --git a/spaces/matthoffner/chatbot-mini/next.config.js b/spaces/matthoffner/chatbot-mini/next.config.js deleted file mode 100644 index f89b5cc4ce37ad426bc95d59b41feafce1e5d524..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/chatbot-mini/next.config.js +++ /dev/null @@ -1,19 +0,0 @@ -const { i18n } = require('./next-i18next.config'); - -/** @type {import('next').NextConfig} */ -const nextConfig = { - i18n, - output: "standalone", - reactStrictMode: true, - - webpack(config, { isServer, dev }) { - config.experiments = { - asyncWebAssembly: true, - layers: true, - }; - - return config; - }, -}; - -module.exports = nextConfig; diff --git a/spaces/maureenmugo/projects/app.py b/spaces/maureenmugo/projects/app.py deleted file mode 100644 index e049d17b1118244197f7951d406ab930089e1498..0000000000000000000000000000000000000000 --- a/spaces/maureenmugo/projects/app.py +++ /dev/null @@ -1,28 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb. - -# %% auto 0 -__all__ = ['learn', 'categories', 'image', 'label', 'examples', 'intf', 'is_cat', 'classify_image'] - -# %% app.ipynb 2 -from fastai.vision.all import * -import gradio as gr - -def is_cat(x): return x[0].isupper() - -# %% app.ipynb 4 -learn=load_learner('model.pkl') - -# %% app.ipynb 6 -categories = ('dog', 'cat') - -def classify_image(img): - pred, idx, probs = learn.predict(img) - return dict(zip(categories, map(float, probs))) - -# %% app.ipynb 8 -image=gr.inputs.Image(shape=(192,192)) -label=gr.outputs.Label() -examples= ['dog.jpeg', 'cat.jpeg'] - -intf=gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples) -intf.launch(inline=True) diff --git a/spaces/maxmax20160403/sovits5.0/whisper/audio.py b/spaces/maxmax20160403/sovits5.0/whisper/audio.py deleted file mode 100644 index de8a1951408b8bf08639f17f01b3fa764a82d83b..0000000000000000000000000000000000000000 --- a/spaces/maxmax20160403/sovits5.0/whisper/audio.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -from functools import lru_cache -from typing import Union - -import ffmpeg -import numpy as np -import torch -import torch.nn.functional as F - -from .utils import exact_div - -# hard-coded audio hyperparameters -SAMPLE_RATE = 16000 -N_FFT = 400 -N_MELS = 80 -HOP_LENGTH = 160 -CHUNK_LENGTH = 30 -N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk -N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input - - -def load_audio(file: str, sr: int = SAMPLE_RATE): - """ - Open an audio file and read as mono waveform, resampling as necessary - - Parameters - ---------- - file: str - The audio file to open - - sr: int - The sample rate to resample the audio if necessary - - Returns - ------- - A NumPy array containing the audio waveform, in float32 dtype. - """ - try: - # This launches a subprocess to decode audio while down-mixing and resampling as necessary. - # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. - out, _ = ( - ffmpeg.input(file, threads=0) - .output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr) - .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True) - ) - except ffmpeg.Error as e: - raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e - - return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0 - - -def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1): - """ - Pad or trim the audio array to N_SAMPLES, as expected by the encoder. - """ - if torch.is_tensor(array): - if array.shape[axis] > length: - array = array.index_select(dim=axis, index=torch.arange(length, device=array.device)) - - if array.shape[axis] < length: - pad_widths = [(0, 0)] * array.ndim - pad_widths[axis] = (0, length - array.shape[axis]) - array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes]) - else: - if array.shape[axis] > length: - array = array.take(indices=range(length), axis=axis) - - if array.shape[axis] < length: - pad_widths = [(0, 0)] * array.ndim - pad_widths[axis] = (0, length - array.shape[axis]) - array = np.pad(array, pad_widths) - - return array - - -@lru_cache(maxsize=None) -def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor: - """ - load the mel filterbank matrix for projecting STFT into a Mel spectrogram. - Allows decoupling librosa dependency; saved using: - - np.savez_compressed( - "mel_filters.npz", - mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80), - ) - """ - assert n_mels == 80, f"Unsupported n_mels: {n_mels}" - with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f: - return torch.from_numpy(f[f"mel_{n_mels}"]).to(device) - - -def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS): - """ - Compute the log-Mel spectrogram of - - Parameters - ---------- - audio: Union[str, np.ndarray, torch.Tensor], shape = (*) - The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz - - n_mels: int - The number of Mel-frequency filters, only 80 is supported - - Returns - ------- - torch.Tensor, shape = (80, n_frames) - A Tensor that contains the Mel spectrogram - """ - if not torch.is_tensor(audio): - if isinstance(audio, str): - audio = load_audio(audio) - audio = torch.from_numpy(audio) - - window = torch.hann_window(N_FFT).to(audio.device) - stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True) - magnitudes = stft[..., :-1].abs() ** 2 - - filters = mel_filters(audio.device, n_mels) - mel_spec = filters @ magnitudes - - log_spec = torch.clamp(mel_spec, min=1e-10).log10() - log_spec = torch.maximum(log_spec, log_spec.max() - 8.0) - log_spec = (log_spec + 4.0) / 4.0 - return log_spec diff --git a/spaces/merve/anonymization/source/measuring-fairness/init.js b/spaces/merve/anonymization/source/measuring-fairness/init.js deleted file mode 100644 index 5a8df63793d90464eb148443787eb91e2b34180b..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/source/measuring-fairness/init.js +++ /dev/null @@ -1,200 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - - -d3.select('body').selectAppend('div.tooltip.tooltip-hidden') - -nCols = 12 - -window.colors = { - well: d3.color('#669399') + '', - sick: d3.color('#EE2A2A') + '', - - // well: d3.color('green') + '', - // sick: d3.color('purple'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#e9a3c9') + '', - // sick: d3.color('#a1d76a'), - - // well: d3.color('#865327') + '', - // sick: d3.color('#012394'), - - // well: d3.color('#012394') + '', - // sick: d3.color('#FBC20F') + '', - - // well: d3.color('#012394') + '', - // sick: d3.color('#E71E24') + '', - - // well: d3.color('#A9159C') + '', - // sick: d3.color('#E71E24') + '', - - // well: d3.color('#A9159C') + '', - // sick: d3.color('#012394') + '', - - // well: d3.color('orange') + '', - // sick: d3.color('#012394') + '', - - -} - -window.colors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(.2), -} - -window.lcolors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(.35) -} -window.llcolors = { - well: d3.interpolate(colors.well, '#fff')(.5), - sick: d3.interpolate(colors.sick, '#fff')(1) -} -window.dcolors = { - well: d3.interpolate(colors.well, '#000')(.65), - sick: d3.interpolate(colors.sick, '#000')(.65) -} - -// window.colors = { -// well: d3.color('#BEF5FF') + '', -// sick: d3.color('#FCC5C3') + '', -// } - -// window.colors = { -// well: d3.color('#669399') + '', -// sick: d3.color('#EE2A2A') + '', -// } - -// window.lcolors = { -// well: d3.interpolate(colors.well, '#fff')(.3), -// sick: d3.interpolate(colors.sick, '#fff')(.3) -// } -// window.llcolors = { -// well: d3.interpolate(colors.well, '#fff')(.2), -// sick: d3.interpolate(colors.sick, '#fff')(.2) -// } - -// window.lcolors = { -// well: '#CFFCF6', -// sick: '#FFBD96' -// } - -// copy(logColors()) -function logColors(){ - return ` - body{ - --colors-well: ${d3.rgb(colors.well)}; - --colors-sick: ${d3.rgb(colors.sick)}; - --lcolors-well: ${d3.rgb(lcolors.well)}; - --lcolors-sick: ${d3.rgb(lcolors.sick)}; - --dcolors-well: ${d3.rgb(dcolors.well)}; - --dcolors-sick: ${d3.rgb(dcolors.sick)}; - } - ` -} - - - -window.init = function(){ - console.clear() - - graphSel = d3.select('#graph').html('').append('div') - totalWidth = graphSel.node().offsetWidth - totalWidth = 400 - - c = d3.conventions({ - sel: graphSel.st({marginTop: 40}), - margin: {top: 20}, - totalWidth, - totalHeight: totalWidth, - }) - - students = makeStudents() - sel = makeSel() - mini = makeMini() - slider = makeSlider() - slides = makeSlides() - gs = makeGS() - - function sizeGraphSel(){ - var scale = (totalWidth + 35)/(innerWidth - 10) // off by one, s is 35 - scale = d3.clamp(1, scale, 2) - - graphSel.st({ - transform: `scale(${1/scale})`, - transformOrigin: '0px 0px', - - }) - } - sizeGraphSel() - d3.select(window).on('resize', sizeGraphSel) - -} -init() - - - - - -!(function(){ - var footnums = '¹²³' - - d3.selectAll('.footstart').each(function(d, i){ - d3.select(this) - .at({ - href: '#footend-' + i, - }) - .text(footnums[i]) - .parent().at({id: 'footstart-' + i}) - }) - - d3.selectAll('.footend').each(function(d, i){ - d3.select(this) - .at({ - href: '#footstart-' + i, - id: 'footend-' + i, - }) - .text(footnums[i]) - }) - - - d3.selectAll('#sections wee, #graph .weepeople').attr('aria-hidden', true) - -})() - - - - - - - - - - - - - - - - - diff --git a/spaces/merve/hidden-bias/public/private-and-fair/index.html b/spaces/merve/hidden-bias/public/private-and-fair/index.html deleted file mode 100644 index e85df1babd2619b3dd0c8c989634bf5ba7f6d937..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/public/private-and-fair/index.html +++ /dev/null @@ -1,199 +0,0 @@ -<!-- -@license -Copyright 2020 Google. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ---> - -<!DOCTYPE html> - -<html> -<head> - <meta charset="utf-8"> - <meta name="viewport" content="width=device-width, initial-scale=1"> - - <link rel="apple-touch-icon" sizes="180x180" href="https://pair.withgoogle.com/images/favicon/apple-touch-icon.png"> - <link rel="icon" type="image/png" sizes="32x32" href="https://pair.withgoogle.com/images/favicon/favicon-32x32.png"> - <link rel="icon" type="image/png" sizes="16x16" href="https://pair.withgoogle.com/images/favicon/favicon-16x16.png"> - <link rel="mask-icon" href="https://pair.withgoogle.com/images/favicon/safari-pinned-tab.svg" color="#00695c"> - <link rel="shortcut icon" href="https://pair.withgoogle.com/images/favicon.ico"> - - <script> - !(function(){ - var url = window.location.href - if (url.split('#')[0].split('?')[0].slice(-1) != '/' && !url.includes('.html')) window.location = url + '/' - })() - </script> - - <title>Can a Model Be Differentially Private and Fair? - - - - - - - - - - - - - - - -
    - -
    - -

    Can a Model Be Differentially Private and Fair?

    -
    Training models with differential privacy stops models from inadvertently leaking sensitive data, but there's an unexpected side-effect: reduced accuracy on underrepresented subgroups.
    -

    Imagine you want to use machine learning to suggest new bands to listen to. You could do this by having lots of people list their favorite bands and using them to train a model. The trained model might be quite useful and fun, but if someone pokes and prods at the model in just the right way, they could extract the music preferences of someone whose data was used to train the model. Other kinds of models are potentially vulnerable; credit card numbers have been pulled out of language models and actual faces reconstructed from image models.

    -

    Training with differential privacy limits the information about any one data point that is extractable but in some cases there’s an unexpected side-effect: reduced accuracy with underrepresented subgroups disparately impacted.

    -
    - -

    Recall that machine learning models are typically trained with gradient descent, a series of small steps taken to minimize an error function. To show how a model can leak its training data, we’ve trained two simple models to separate red and blue dots using two simple datasets that differ in one way: a single isolated data point in the upper left has been switched from red to blue.

    -
    - -

    Notice that the two models have very different boundary lines near the isolated point by the end of the training. Someone with access to the trained model might be able to infer if the point in the upper left is red or blue — if the color represented sensitive information, like someone’s voting record, that could be quite bad!

    -

    Protecting the Privacy of Training Points

    -

    We can prevent a single data point from drastically altering the model by adding two operations to each training step:²

    -
      -
    • ⚬ Clipping the gradient (here, limiting how much the boundary line can move with each step) to bound the maximum impact a single data point can have on the final model.
    • -
    • ⚬ Adding random noise to the gradient.
    • -
    -

    Try increasing the random noise below. We’re now training lots of differentially private models; the more the potential models for the red and blue outlier points overlap, the more plausible deniability the person in the upper left has.

    -
    - -

    You can also try dragging the other points around and adjusting the gradient clipping. Are points in the center or outliers more likely to modify the boundary lines? In two dimensions there’s a limited number of outliers, but in higher dimensions more points are outliers and much more information can be extracted from a trained model.

    -

    Correctly combined, adding gradient clipping and random noise to gradient descent make it possible to train a model with differential privacy – we can guarantee that a model trained on a given dataset is essentially indistinguishable from a model trained on the same dataset with a single point changed.

    -

    Predictions on Outliers Change the Most

    -

    What does this look like in practice? In Distribution Density, Tails, and Outliers in Machine Learning, a series of increasingly differentially private models were trained on MNIST digits. Every digit in the training set was ranked according to the highest level of privacy that correctly classified it.

    -
    - -

    On the lower left, you can see digits labeled as “3” in the training data that look more like a “2” and a “9”. They’re very different from the other “3”s in the training data so adding just a bit of privacy protection causes the model to no longer classify them as “3”. Under some specific circumstances, differential privacy can actually improve how well the model generalizes to data it wasn’t trained on by limiting the influence of spurious examples.

    -

    The right side shows more canonical digits which are classified correctly even with high levels of privacy because they’re quite similar to other digits in the training data.

    -

    The Accuracy Tradeoff

    -

    Limiting how much a model can learn from a single example does have a downside: it can also decrease the model’s accuracy. With 7,500 training points, 90% accuracy on MNIST digits is only achievable with an extremely low level of privacy protection; increasing privacy quickly lowers the model’s accuracy.

    -

    Collecting more training data offers a way out of this accuracy/privacy tradeoff. With 60,000 training points, 90% accuracy can be reached with a higher privacy level than almost all real-world deployments of differential privacy.

    -
    - -

    Looking at the differences between predictions by digit class shows another potential complication: some classes are harder to identify than others. Detecting an “8” with high confidence requires more training data and/or lower privacy than detecting a “0” with high confidence.

    -
    - -

    This problem is exacerbated if the training data has fewer examples of one class than the others. Trying to predict an uncommon event with a differentially private model can require an enormous amount of data.

    -

    Implications for Fairness

    -

    Outliers also aren’t evenly distributed within a class. Below, MNIST digits are colored by their sensitivity to higher privacy levels and projected with UMAP, forming several clusters of privacy-sensitive yellow digits. It’s possible to inadvertently train a model with good overall accuracy on a class but very low accuracy on a smaller group within the class.

    -
    - -

    There’s nothing that makes a “1” slanted to the left intrinsically harder to classify, but because there are only a few slanted “1”s in the training data it’s difficult to make a model that classifies them accurately without leaking information.

    -

    This disparate impact doesn’t just happen in datasets of differently drawn digits: increased levels of differential privacy in a range of image and language models disproportionality decreased accuracy on underrepresented subgroups. And adding differential privacy to a medical model reduced the influence of Black patients’ data on the model while increasing the influence of white patients’ data.

    -

    Lowering the privacy level might not help non-majoritarian data points either – they’re the ones most susceptible to having their information exposed. Again, escaping the accuracy/privacy tradeoff requires collecting more data – this time from underrepresented subgroups.

    -

    More Reading

    -

    There are deep connections between generalization, memorization and privacy that are still not well understood. Slightly changing the privacy constraints, for example, can create new options. If public, unlabeled data exists, a “Private Aggregation of Teacher Ensembles“ could be used instead of gradient clipping and random noise to train a differentially private model with a smaller disparate impact on accuracy.

    -

    Finding ways to increase privacy with a smaller impact on accuracy is an active area of research – model architectures designed with privacy in mind and better dataset cleaning look like promising avenues.

    -

    There are also additional accuracy/privacy/fairness tradeoffs beyond what’s discussed in this post. Even if a differentially private model doesn’t have large accuracy gaps between subgroups, enforcing fairness metrics can reduce privacy or accuracy.

    -

    This post focuses on protecting the privacy of individual data points. In practice more work might be necessary to ensure that the privacy of users – who could contribute much more than a single data point each – is also protected.

    -

    These questions are also significant outside of machine learning. Allocating resources based on a differentially private dataset – with no machine learning model involved – can also disproportionately affect different groups. The 2020 Census is the first to use differential privacy and this could have a wide range of impacts, including how congressional districts are drawn.

    -

    Credits

    -

    Adam Pearce // January 2022

    -

    Thanks to Abhradeep Thakurta, Andreas Terzis, Andy Coenen, Asma Ghandeharioun, Brendan McMahan, Ellen Jiang, Emily Reif, Fernanda Viégas, James Wexler, Kevin Robinson, Matthew Jagielski, Martin Wattenberg, Meredith Morris, Miguel Guevara, Nicolas Papernot and Nithum Thain for their help with this piece.

    -

    Footnotes

    -

    To speed up training at the cost of looser privacy bounds, gradients, clipping and noise can be calculated on a group of data points instead of individual data points.

    -

    The “ε” in ε-differential privacy essentially measures the overlap in two distributions after changing a single data point.

    -

    Clipping and noising are also used outside of differential privacy as regularization techniques to improve accuracy.

    In addition to accidently mislabeled examples, differential privacy can also provide some protection against data poisoning attacks.

    -

    While visually similar digits aren’t necessarily interpreted in similar ways by the model, the clustering of visually similar digits in the UMAP diagram at the bottom of the page (which projects embedding from the penultimate layer of digit classifier) suggests there is a close connection here.

    -

    Rebalancing the dataset without collecting more data doesn’t avoid this privacy/accuracy tradeoff – upsampling the smaller class reduces privacy and downsampling the larger class reduces data and lowers accuracy.

    -

    See the appendix on Subgroup Size and Accuracy for more detail.

    -

    Appendix: Subgroup Size and Accuracy

    -

    How, exactly, does the amount of training data, the privacy level and the percentage of data from a subgroup impact accuracy? Using MNIST digits rotated 90° as a stand-in for a smaller subgroup, we can see how the accuracy of a series of simple models that classify “1”s and “7”s change based on these attributes.

    -

    On the far left, models without any rotated digits in the training data never classify those digits more accurately than random guessing. By rotating 5% of the training digits, a small slice of models with lots of training data and low privacy can accurately classify rotated digits.

    -
    - -

    Increasing the proportion of rotated digits to 10% or 20% or even more makes it possible to train a higher privacy model that performs well on both types of digits with the same amount of training data.

    -

    Click on one of the models above and you can see how the accuracy gap shifts as number of training points, privacy level and percentage of rotated digits are independently changed.

    -
    - -

    Intuitively, adding more training data has diminishing marginal increases to accuracy. Accuracy on the smaller group of rotated digits, which may just be on the cusp of being learned, falls off faster as the effective amount of training data is decreased — a disparate reduction in accuracy.

    -

    More Explorables

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/merve/uncertainty-calibration/public/measuring-fairness/students.js b/spaces/merve/uncertainty-calibration/public/measuring-fairness/students.js deleted file mode 100644 index 4af55cba8cc763d96aa478be96a785048d9edc42..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/public/measuring-fairness/students.js +++ /dev/null @@ -1,90 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - -window.makeStudents = function(){ - var seed = new Math.seedrandom('he4a15') - var rand = d3.randomUniform.source(seed)(0, 1) - var letters = 'abcdefgijlmnopqrsuvwxyz' - letters = (letters + letters.toUpperCase()).split('') - - var nSickCols = 6 - var mSickCols = 8 - var fSickCols = nSickCols*2 - mSickCols - - var students = d3.range(nCols*nCols).map(i => { - var letter = letters[~~d3.randomUniform.source(seed)(0, letters.length)()] - - var isMale = i % 2 == 0 - var isSick = i < (isMale ? mSickCols : fSickCols)*nCols - var grade = isSick*.5 + rand() - var pos = {} - - return {letter, isSick, isMale, grade, pos} - }) - - students = _.sortBy(students, d => -d.grade) - d3.nestBy(students, d => d.isSick).forEach(group => { - var isSick = group[0].isSick - - var sickCols = nSickCols - var cols = isSick ? sickCols : nCols - sickCols - var xOffset = isSick ? 0 : sickCols - - group.forEach((d, i) => { - d.pos.allIJ = [cols - 1 - (i % cols) + xOffset, ~~(i/cols)] - var spreadIJ = d.pos.allIJ.slice() - if (!d.isSick) spreadIJ[0] += .1 - d.pos.all = spreadIJ.map(d => d*c.width/10) - }) - }) - - d3.nestBy(students, d => d.isSick + '-' + d.isMale).forEach(group => { - var isSick = group[0].isSick - var isMale = group[0].isMale - - var sickCols = isMale ? mSickCols : fSickCols - var cols = isSick ? sickCols : nCols - sickCols - var xOffset = isSick ? 0 : sickCols - var yOffset = isMale ? nCols/2 + 2 : 0 - - group.forEach((d, i) => { - d.pos.sexIJ = [cols - 1 - (i % cols) + xOffset, ~~(i/cols) + yOffset] - d.pos.sexGroupIJ = [cols - 1 - (i % cols) + xOffset, ~~(i/cols)] - var spreadIJ = d.pos.sexIJ.slice() - if (!d.isSick) spreadIJ[0] += .1 - d.pos.sex = spreadIJ.map(d => d*c.width/10) - }) - }) - - students.maleOffsetJ = nCols/2 + 2 - students.maleOffsetPx= students.maleOffsetJ*c.width/10 - - students.fSickCols = fSickCols - students.mSickCols = mSickCols - - students.colWidth = c.width/10 - - students.rand = rand - return students -} - - - - - - -if (window.init) window.init() diff --git a/spaces/mfrashad/CharacterGAN/models/stylegan2/stylegan2-pytorch/lpips/__init__.py b/spaces/mfrashad/CharacterGAN/models/stylegan2/stylegan2-pytorch/lpips/__init__.py deleted file mode 100644 index a4f86b7ee229b333a64f16d0091e988492f99c58..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/CharacterGAN/models/stylegan2/stylegan2-pytorch/lpips/__init__.py +++ /dev/null @@ -1,160 +0,0 @@ - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -from skimage.measure import compare_ssim -import torch -from torch.autograd import Variable - -from lpips import dist_model - -class PerceptualLoss(torch.nn.Module): - def __init__(self, model='net-lin', net='alex', colorspace='rgb', spatial=False, use_gpu=True, gpu_ids=[0]): # VGG using our perceptually-learned weights (LPIPS metric) - # def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss - super(PerceptualLoss, self).__init__() - print('Setting up Perceptual loss...') - self.use_gpu = use_gpu - self.spatial = spatial - self.gpu_ids = gpu_ids - self.model = dist_model.DistModel() - self.model.initialize(model=model, net=net, use_gpu=use_gpu, colorspace=colorspace, spatial=self.spatial, gpu_ids=gpu_ids) - print('...[%s] initialized'%self.model.name()) - print('...Done') - - def forward(self, pred, target, normalize=False): - """ - Pred and target are Variables. - If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1] - If normalize is False, assumes the images are already between [-1,+1] - - Inputs pred and target are Nx3xHxW - Output pytorch Variable N long - """ - - if normalize: - target = 2 * target - 1 - pred = 2 * pred - 1 - - return self.model.forward(target, pred) - -def normalize_tensor(in_feat,eps=1e-10): - norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1,keepdim=True)) - return in_feat/(norm_factor+eps) - -def l2(p0, p1, range=255.): - return .5*np.mean((p0 / range - p1 / range)**2) - -def psnr(p0, p1, peak=255.): - return 10*np.log10(peak**2/np.mean((1.*p0-1.*p1)**2)) - -def dssim(p0, p1, range=255.): - return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2. - -def rgb2lab(in_img,mean_cent=False): - from skimage import color - img_lab = color.rgb2lab(in_img) - if(mean_cent): - img_lab[:,:,0] = img_lab[:,:,0]-50 - return img_lab - -def tensor2np(tensor_obj): - # change dimension of a tensor object into a numpy array - return tensor_obj[0].cpu().float().numpy().transpose((1,2,0)) - -def np2tensor(np_obj): - # change dimenion of np array into tensor array - return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1))) - -def tensor2tensorlab(image_tensor,to_norm=True,mc_only=False): - # image tensor to lab tensor - from skimage import color - - img = tensor2im(image_tensor) - img_lab = color.rgb2lab(img) - if(mc_only): - img_lab[:,:,0] = img_lab[:,:,0]-50 - if(to_norm and not mc_only): - img_lab[:,:,0] = img_lab[:,:,0]-50 - img_lab = img_lab/100. - - return np2tensor(img_lab) - -def tensorlab2tensor(lab_tensor,return_inbnd=False): - from skimage import color - import warnings - warnings.filterwarnings("ignore") - - lab = tensor2np(lab_tensor)*100. - lab[:,:,0] = lab[:,:,0]+50 - - rgb_back = 255.*np.clip(color.lab2rgb(lab.astype('float')),0,1) - if(return_inbnd): - # convert back to lab, see if we match - lab_back = color.rgb2lab(rgb_back.astype('uint8')) - mask = 1.*np.isclose(lab_back,lab,atol=2.) - mask = np2tensor(np.prod(mask,axis=2)[:,:,np.newaxis]) - return (im2tensor(rgb_back),mask) - else: - return im2tensor(rgb_back) - -def rgb2lab(input): - from skimage import color - return color.rgb2lab(input / 255.) - -def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.): - image_numpy = image_tensor[0].cpu().float().numpy() - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor - return image_numpy.astype(imtype) - -def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.): - return torch.Tensor((image / factor - cent) - [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) - -def tensor2vec(vector_tensor): - return vector_tensor.data.cpu().numpy()[:, :, 0, 0] - -def voc_ap(rec, prec, use_07_metric=False): - """ ap = voc_ap(rec, prec, [use_07_metric]) - Compute VOC AP given precision and recall. - If use_07_metric is true, uses the - VOC 07 11 point method (default:False). - """ - if use_07_metric: - # 11 point metric - ap = 0. - for t in np.arange(0., 1.1, 0.1): - if np.sum(rec >= t) == 0: - p = 0 - else: - p = np.max(prec[rec >= t]) - ap = ap + p / 11. - else: - # correct AP calculation - # first append sentinel values at the end - mrec = np.concatenate(([0.], rec, [1.])) - mpre = np.concatenate(([0.], prec, [0.])) - - # compute the precision envelope - for i in range(mpre.size - 1, 0, -1): - mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) - - # to calculate area under PR curve, look for points - # where X axis (recall) changes value - i = np.where(mrec[1:] != mrec[:-1])[0] - - # and sum (\Delta recall) * prec - ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) - return ap - -def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.): -# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.): - image_numpy = image_tensor[0].cpu().float().numpy() - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor - return image_numpy.astype(imtype) - -def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.): -# def im2tensor(image, imtype=np.uint8, cent=1., factor=1.): - return torch.Tensor((image / factor - cent) - [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) diff --git a/spaces/mfrashad/CharacterGAN/models/stylegan2/stylegan2-pytorch/op/upfirdn2d.cpp b/spaces/mfrashad/CharacterGAN/models/stylegan2/stylegan2-pytorch/op/upfirdn2d.cpp deleted file mode 100644 index d2e633dc896433c205e18bc3e455539192ff968e..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/CharacterGAN/models/stylegan2/stylegan2-pytorch/op/upfirdn2d.cpp +++ /dev/null @@ -1,23 +0,0 @@ -#include - - -torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel, - int up_x, int up_y, int down_x, int down_y, - int pad_x0, int pad_x1, int pad_y0, int pad_y1); - -#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) - -torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel, - int up_x, int up_y, int down_x, int down_y, - int pad_x0, int pad_x1, int pad_y0, int pad_y1) { - CHECK_CUDA(input); - CHECK_CUDA(kernel); - - return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)"); -} \ No newline at end of file diff --git a/spaces/mingyuan/ReMoDiffuse/mogen/core/evaluation/evaluators/precision_evaluator.py b/spaces/mingyuan/ReMoDiffuse/mogen/core/evaluation/evaluators/precision_evaluator.py deleted file mode 100644 index 09063a7ad778e559216a3746564007ee30a6516d..0000000000000000000000000000000000000000 --- a/spaces/mingyuan/ReMoDiffuse/mogen/core/evaluation/evaluators/precision_evaluator.py +++ /dev/null @@ -1,74 +0,0 @@ -import numpy as np -import torch - -from ..get_model import get_motion_model, get_text_model -from .base_evaluator import BaseEvaluator -from ..utils import calculate_top_k, euclidean_distance_matrix - - -class PrecisionEvaluator(BaseEvaluator): - - def __init__(self, - data_len=0, - text_encoder_name=None, - text_encoder_path=None, - motion_encoder_name=None, - motion_encoder_path=None, - top_k=3, - batch_size=32, - drop_last=False, - replication_times=1, - replication_reduction='statistics', - **kwargs): - super().__init__( - replication_times=replication_times, - replication_reduction=replication_reduction, - batch_size=batch_size, - drop_last=drop_last, - eval_begin_idx=0, - eval_end_idx=data_len - ) - self.append_indexes = None - self.text_encoder = get_text_model(text_encoder_name, text_encoder_path) - self.motion_encoder = get_motion_model(motion_encoder_name, motion_encoder_path) - self.top_k = top_k - self.model_list = [self.text_encoder, self.motion_encoder] - - def single_evaluate(self, results): - results = self.prepare_results(results) - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - motion = results['motion'] - pred_motion = results['pred_motion'] - pred_motion_length = results['pred_motion_length'] - pred_motion_mask = results['pred_motion_mask'] - text = results['text'] - token = results['token'] - self.text_encoder.to(device) - self.motion_encoder.to(device) - self.text_encoder.eval() - self.motion_encoder.eval() - with torch.no_grad(): - word_emb = self.text_encode(text, token, device=device).cpu().detach().numpy() - motion_emb = self.motion_encode(pred_motion, pred_motion_length, pred_motion_mask, device).cpu().detach().numpy() - dist_mat = euclidean_distance_matrix(word_emb, motion_emb) - argsmax = np.argsort(dist_mat, axis=1) - top_k_mat = calculate_top_k(argsmax, top_k=self.top_k) - top_k_count = top_k_mat.sum(axis=0) - all_size = word_emb.shape[0] - return top_k_count, all_size - - def concat_batch_metrics(self, batch_metrics): - top_k_count = 0 - all_size = 0 - for batch_top_k_count, batch_all_size in batch_metrics: - top_k_count += batch_top_k_count - all_size += batch_all_size - R_precision = top_k_count / all_size - return R_precision - - def parse_values(self, values): - metrics = {} - for top_k in range(self.top_k): - metrics['R_precision Top %d (mean)' % (top_k + 1)] = values[0][top_k] - metrics['R_precision Top %d (conf)' % (top_k + 1)] = values[1][top_k] - return metrics diff --git a/spaces/mipbkhn/BreastCancer/gradio_article.md b/spaces/mipbkhn/BreastCancer/gradio_article.md deleted file mode 100644 index 8823383845d6448083ed5fd99bf709e0be2ec55b..0000000000000000000000000000000000000000 --- a/spaces/mipbkhn/BreastCancer/gradio_article.md +++ /dev/null @@ -1,11 +0,0 @@ - -## Description -In this application, I delved into the realm of medical imaging to combat one of the most prevalent and devastating diseases worldwide – breast cancer. Inspired by the imperative need for early detection and the potential to revolutionize patient care, I participated in the prestigious Kaggle competition, set forth by the Radiological Society of North America (RSNA), with the audacious goal of improving the identification of breast cancer through automated screening mammography. - -Breast cancer remains a formidable global health challenge, with staggering statistics revealing its widespread impact. To address this pressing issue, I harnessed the power of advanced machine learning techniques to revolutionize the process of breast cancer detection. By leveraging a diverse dataset of screening mammograms, acquired from regular screenings, this project aimed to develop a robust and accurate AI model capable of identifying potential malignancies with a level of precision that could rival experienced radiologists. - -The significance of this endeavor extends beyond statistical improvement; it holds the potential to transform the landscape of breast cancer diagnosis. Through this project, I sought to empower healthcare professionals with a tool that not only enhances their diagnostic accuracy but also optimizes their workflow, thereby amplifying the quality and safety of patient care. The ripple effects of this advancement are far-reaching – from reducing the burden of unnecessary medical procedures and mitigating costs, to extending the benefits of early detection to underserved populations around the world. - -The challenges inherent in the early detection of breast cancer, ranging from the scarcity of trained radiologists to the prevalence of false positive results, underscored the urgency of my pursuit. Through rigorous experimentation, iterative model refinement, and insightful feature engineering, I endeavored to create an AI solution that holds the potential to reshape the future of breast cancer screening. - -As breast cancer continues to impact countless lives globally, the significance of such AI application cannot be overstated. It represents a profound step forward in merging cutting-edge technology with medical expertise, with the shared goal of reducing breast cancer mortality rates. With this project, I aspire to leave an indelible mark on the field of medical AI, contributing to a brighter future where early detection becomes not only a possibility but a reality for all, regardless of geographic or socioeconomic constraints. \ No newline at end of file diff --git a/spaces/mjdolan/Holiday-StyleGAN-NADA/e4e/utils/__init__.py b/spaces/mjdolan/Holiday-StyleGAN-NADA/e4e/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ml-energy/leaderboard/data/README.md b/spaces/ml-energy/leaderboard/data/README.md deleted file mode 100644 index 1e96d3d672036878a9491af22b61edc7d9ac46ae..0000000000000000000000000000000000000000 --- a/spaces/ml-energy/leaderboard/data/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# Data files for the ML.ENERGY Leaderboard - -This directory holds all the data for the leaderboard table. - -Code that reads in the data here can be found in the constructor of `TableManager` in `app.py`. - -## Parameters - -There are two types of parameters: (1) Those that become radio buttons on the leaderboard and (2) those that become columns on the leaderboard table. -Models are always placed in rows. - -Currently, there are only two parameters that become radio buttons: GPU model (e.g., V100, A40, A100) and task (e.g., chat, chat-concise, instruct, and instruct-concise). -This is defined in the `schema.yaml` file. - -Radio button parameters have their own CSV file in this directory. -For instance, benchmark results for the *chat* task ran on an *A100* GPU lives in `A100_chat_benchmark.csv`. This file name is dynamically constructed by the leaderboard Gradio application by looking at `schema.yaml` and read in as a Pandas DataFrame. - -Parameters that become columns in the table are put directly in the benchmark CSV files, e.g., `batch_size` and `datatype`. - -## Adding new models - -1. Add your model to `models.json`. - - The model's JSON key should be its unique codename, e.g. Hugging Face Hub model name. It's usually not that readable. - - `url` should point to a page where people can obtain the model's weights, e.g. Hugging Face Hub. - - `nickname` should be a short human-readable string that identifies the model. - - `params` should be an integer rounded to billions. - -1. Add NLP dataset evaluation scores to `score.csv`. - - `model` is the model's JSON key in `models.json`. - - `arc` is the accuracy on the [ARC challenge](https://allenai.org/data/arc) dataset. - - `hellaswag` is the accuracy on the [HellaSwag](https://allenai.org/data/hellaswag) dataset. - - `truthfulqa` is the accuracy on the [TruthfulQA](https://github.com/sylinrl/TruthfulQA) MC2 dataset. - - We obtain these metrics using lm-evaluation-harness. See [here](https://github.com/ml-energy/leaderboard/tree/master/pegasus#nlp-benchmark) for specific instructions. - -1. Add benchmarking results in CSV files, e.g. `A100_chat_benchmark.csv`. It should be evident from the name of the CSV files which setting the file corresponds to. diff --git a/spaces/ml6team/Speaker-Diarization/diarizers/diarizer.py b/spaces/ml6team/Speaker-Diarization/diarizers/diarizer.py deleted file mode 100644 index 2d163d67d4b5bce7c518307739664ff757513178..0000000000000000000000000000000000000000 --- a/spaces/ml6team/Speaker-Diarization/diarizers/diarizer.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -Abstract class for diarization -""" - -import matplotlib.pyplot as plt - -from abc import ABC, abstractmethod - - -class Diarizer(ABC): - """ - Diarizer base class - """ - - @abstractmethod - def get_diarization_figure(self) -> plt.gcf: - """ - Function that returns the audio plot with diarization segmentations - Returns: - plt.gcf: the diarization plot - """ diff --git a/spaces/monisazeem/ChatGPT4/README.md b/spaces/monisazeem/ChatGPT4/README.md deleted file mode 100644 index 7938de14e5355209aaae713f289ca469181bbb17..0000000000000000000000000000000000000000 --- a/spaces/monisazeem/ChatGPT4/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Chat-with-GPT4 -emoji: 🚀 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: ysharma/ChatGPT4 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mrneuralnet/P-DFD/dataset/wild_deepfake.py b/spaces/mrneuralnet/P-DFD/dataset/wild_deepfake.py deleted file mode 100644 index 5f39c1a81f04fadae39dd46d0aae9cefc023b6ab..0000000000000000000000000000000000000000 --- a/spaces/mrneuralnet/P-DFD/dataset/wild_deepfake.py +++ /dev/null @@ -1,100 +0,0 @@ -import torch -import numpy as np -from os.path import join -from dataset import AbstractDataset - -SPLITS = ["train", "test"] - - -class WildDeepfake(AbstractDataset): - """ - Wild Deepfake Dataset proposed in "WildDeepfake: A Challenging Real-World Dataset for Deepfake Detection" - """ - - def __init__(self, cfg, seed=2022, transforms=None, transform=None, target_transform=None): - # pre-check - if cfg['split'] not in SPLITS: - raise ValueError(f"split should be one of {SPLITS}, but found {cfg['split']}.") - super(WildDeepfake, self).__init__(cfg, seed, transforms, transform, target_transform) - print(f"Loading data from 'WildDeepfake' of split '{cfg['split']}'" - f"\nPlease wait patiently...") - self.categories = ['original', 'fake'] - self.root = cfg['root'] - self.num_train = cfg.get('num_image_train', None) - self.num_test = cfg.get('num_image_test', None) - self.images, self.targets = self.__get_images() - print(f"Data from 'WildDeepfake' loaded.") - print(f"Dataset contains {len(self.images)} images.\n") - - def __get_images(self): - if self.split == 'train': - num = self.num_train - elif self.split == 'test': - num = self.num_test - else: - num = None - real_images = torch.load(join(self.root, self.split, "real.pickle")) - if num is not None: - real_images = np.random.choice(real_images, num // 3, replace=False) - real_tgts = [torch.tensor(0)] * len(real_images) - print(f"real: {len(real_tgts)}") - fake_images = torch.load(join(self.root, self.split, "fake.pickle")) - if num is not None: - fake_images = np.random.choice(fake_images, num - num // 3, replace=False) - fake_tgts = [torch.tensor(1)] * len(fake_images) - print(f"fake: {len(fake_tgts)}") - return real_images + fake_images, real_tgts + fake_tgts - - def __getitem__(self, index): - path = join(self.root, self.split, self.images[index]) - tgt = self.targets[index] - return path, tgt - - -if __name__ == '__main__': - import yaml - - config_path = "../config/dataset/wilddeepfake.yml" - with open(config_path) as config_file: - config = yaml.load(config_file, Loader=yaml.FullLoader) - config = config["train_cfg"] - # config = config["test_cfg"] - - - def run_dataset(): - dataset = WildDeepfake(config) - print(f"dataset: {len(dataset)}") - for i, _ in enumerate(dataset): - path, target = _ - print(f"path: {path}, target: {target}") - if i >= 9: - break - - - def run_dataloader(display_samples=False): - from torch.utils import data - import matplotlib.pyplot as plt - - dataset = WildDeepfake(config) - dataloader = data.DataLoader(dataset, batch_size=8, shuffle=True) - print(f"dataset: {len(dataset)}") - for i, _ in enumerate(dataloader): - path, targets = _ - image = dataloader.dataset.load_item(path) - print(f"image: {image.shape}, target: {targets}") - if display_samples: - plt.figure() - img = image[0].permute([1, 2, 0]).numpy() - plt.imshow(img) - # plt.savefig("./img_" + str(i) + ".png") - plt.show() - if i >= 9: - break - - - ########################### - # run the functions below # - ########################### - - # run_dataset() - run_dataloader(False) diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/m2m_100/tok.sh b/spaces/mshukor/UnIVAL/fairseq/examples/m2m_100/tok.sh deleted file mode 100644 index ba2ec5a2f3f4794d2e528d3a6574bf05abe1d043..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/m2m_100/tok.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) 2019-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# - -set -e - -TOKENIZERS_SCRIPTS=tokenizers -INSTALL_PATH=$TOKENIZERS_SCRIPTS/thirdparty - -N_THREADS=8 - -lg=$1 - -MOSES=$INSTALL_PATH/mosesdecoder -REPLACE_UNICODE_PUNCT=$MOSES/scripts/tokenizer/replace-unicode-punctuation.perl -NORM_PUNC=$MOSES/scripts/tokenizer/normalize-punctuation.perl -REM_NON_PRINT_CHAR=$MOSES/scripts/tokenizer/remove-non-printing-char.perl -TOKENIZER=$MOSES/scripts/tokenizer/tokenizer.perl - -# special tokenization for Romanian -WMT16_SCRIPTS=$INSTALL_PATH/wmt16-scripts - -NORMALIZE_ROMANIAN=$WMT16_SCRIPTS/preprocess/normalise-romanian.py -REMOVE_DIACRITICS=$WMT16_SCRIPTS/preprocess/remove-diacritics.py - -# Burmese -MY_SEGMENT=$INSTALL_PATH/seg_my.py - -# Arabic -AR_TOKENIZER=$TOKENIZERS_SCRIPTS/tokenizer_ar.sh - -# Korean -KO_SEGMENT=$TOKENIZERS_SCRIPTS/seg_ko.sh - -# Japanese -JA_SEGMENT=$TOKENIZERS_SCRIPTS/seg_ja.sh - -# Indic -IN_TOKENIZER=$TOKENIZERS_SCRIPTS/tokenize_indic.py -INDIC_RESOURCES_PATH=$INSTALL_PATH/indic_nlp_resources - -# Thai -THAI_TOKENIZER=$TOKENIZERS_SCRIPTS/tokenize_thai.py - -# Chinese -CHINESE_TOKENIZER=$TOKENIZERS_SCRIPTS/tokenize_zh.py - -# Chinese -if [ "$lg" = "zh" ]; then - cat - | $REPLACE_UNICODE_PUNCT | $NORM_PUNC -l $lg | $REM_NON_PRINT_CHAR | python $CHINESE_TOKENIZER -# Thai -elif [ "$lg" = "th" ]; then - cat - | python $THAI_TOKENIZER -# Japanese -elif [ "$lg" = "ja" ]; then - cat - | $REPLACE_UNICODE_PUNCT | $NORM_PUNC -l $lg | $REM_NON_PRINT_CHAR | ${JA_SEGMENT} -# Korean -elif [ "$lg" = "ko" ]; then - cat - | $REM_NON_PRINT_CHAR | ${KO_SEGMENT} -# Romanian -elif [ "$lg" = "ro" ]; then - cat - | $REPLACE_UNICODE_PUNCT | $NORM_PUNC -l $lg | $REM_NON_PRINT_CHAR | $NORMALIZE_ROMANIAN | $REMOVE_DIACRITICS | $TOKENIZER -no-escape -threads $N_THREADS -l $lg -# Burmese -elif [ "$lg" = "my" ]; then - cat - | python ${MY_SEGMENT} -# Arabic -elif [ "$lg" = "ar" ]; then - cat - | ${AR_TOKENIZER} -# Indic -elif [ "$lg" = "ne" ]; then - cat - | python ${IN_TOKENIZER} $lg -elif [ "$lg" = "si" ]; then - cat - | python ${IN_TOKENIZER} $lg -elif [ "$lg" = "hi" ]; then - cat - | python ${IN_TOKENIZER} $lg -# other languages -else - cat - | $REPLACE_UNICODE_PUNCT | $NORM_PUNC -l $lg | $REM_NON_PRINT_CHAR | $TOKENIZER -no-escape -threads $N_THREADS -l $lg -fi diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/multilingual/finetune_multilingual_model.sh b/spaces/mshukor/UnIVAL/fairseq/examples/multilingual/finetune_multilingual_model.sh deleted file mode 100644 index 25960c5dc8a02e5580b61837099770a082b4dd83..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/multilingual/finetune_multilingual_model.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -path_2_data=$1 # which contains binarized data for each directions -lang_list=$2 # -lang_pairs=$3 #a list language pairs to train multilingual models, e.g. "en-fr,en-cs,fr-en,cs-en" -# pretrained can be an mBART pretrained model as well -pretrained_model=$4 # - - -fairseq-train "$path_2_data" \ - --encoder-normalize-before --decoder-normalize-before \ - --arch transformer --layernorm-embedding \ - --task translation_multi_simple_epoch \ - --finetune-from-model "$pretrained_model" \ - --sampling-method "temperature" \ - --sampling-temperature "1.5" \ - --encoder-langtok "src" \ - --decoder-langtok \ - --lang-dict "$lang_list" \ - --lang-pairs "$lang_pairs" \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.2 \ - --optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \ - --lr-scheduler inverse_sqrt --lr 3e-05 --warmup-updates 2500 --max-update 40000 \ - --dropout 0.3 --attention-dropout 0.1 --weight-decay 0.0 \ - --max-tokens 1024 --update-freq 2 \ - --save-interval 1 --save-interval-updates 5000 --keep-interval-updates 10 --no-epoch-checkpoints \ - --seed 222 --log-format simple --log-interval 2 diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/fconv.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/models/fconv.py deleted file mode 100644 index c99a2151014d816ec9aff6f4b27d71224dd7b4cf..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/fconv.py +++ /dev/null @@ -1,756 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import utils -from fairseq.models import ( - FairseqEncoder, - FairseqEncoderDecoderModel, - FairseqIncrementalDecoder, - register_model, - register_model_architecture, -) -from fairseq.modules import ( - AdaptiveSoftmax, - BeamableMM, - FairseqDropout, - GradMultiply, - LearnedPositionalEmbedding, - LinearizedConvolution, -) - - -@register_model("fconv") -class FConvModel(FairseqEncoderDecoderModel): - """ - A fully convolutional model, i.e. a convolutional encoder and a - convolutional decoder, as described in `"Convolutional Sequence to Sequence - Learning" (Gehring et al., 2017) `_. - - Args: - encoder (FConvEncoder): the encoder - decoder (FConvDecoder): the decoder - - The Convolutional model provides the following named architectures and - command-line arguments: - - .. argparse:: - :ref: fairseq.models.fconv_parser - :prog: - """ - - @classmethod - def hub_models(cls): - def moses_subword(path): - return { - "path": path, - "tokenizer": "moses", - "bpe": "subword_nmt", - } - - return { - "conv.wmt14.en-fr": moses_subword( - "https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2" - ), - "conv.wmt14.en-de": moses_subword( - "https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2" - ), - "conv.wmt17.en-de": moses_subword( - "https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2" - ), - } - - def __init__(self, encoder, decoder): - super().__init__(encoder, decoder) - self.encoder.num_attention_layers = sum( - layer is not None for layer in decoder.attention - ) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - # fmt: off - parser.add_argument('--dropout', type=float, metavar='D', - help='dropout probability') - parser.add_argument('--encoder-embed-dim', type=int, metavar='N', - help='encoder embedding dimension') - parser.add_argument('--encoder-embed-path', type=str, metavar='STR', - help='path to pre-trained encoder embedding') - parser.add_argument('--encoder-layers', type=str, metavar='EXPR', - help='encoder layers [(dim, kernel_size), ...]') - parser.add_argument('--decoder-embed-dim', type=int, metavar='N', - help='decoder embedding dimension') - parser.add_argument('--decoder-embed-path', type=str, metavar='STR', - help='path to pre-trained decoder embedding') - parser.add_argument('--decoder-layers', type=str, metavar='EXPR', - help='decoder layers [(dim, kernel_size), ...]') - parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', - help='decoder output embedding dimension') - parser.add_argument('--decoder-attention', type=str, metavar='EXPR', - help='decoder attention [True, ...]') - parser.add_argument('--share-input-output-embed', action='store_true', - help='share input and output embeddings (requires' - ' --decoder-out-embed-dim and --decoder-embed-dim' - ' to be equal)') - # fmt: on - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - # make sure that all args are properly defaulted (in case there are any new ones) - base_architecture(args) - - encoder_embed_dict = None - if args.encoder_embed_path: - encoder_embed_dict = utils.parse_embedding(args.encoder_embed_path) - utils.print_embed_overlap(encoder_embed_dict, task.source_dictionary) - - decoder_embed_dict = None - if args.decoder_embed_path: - decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path) - utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary) - - encoder = FConvEncoder( - dictionary=task.source_dictionary, - embed_dim=args.encoder_embed_dim, - embed_dict=encoder_embed_dict, - convolutions=eval(args.encoder_layers), - dropout=args.dropout, - max_positions=args.max_source_positions, - ) - decoder = FConvDecoder( - dictionary=task.target_dictionary, - embed_dim=args.decoder_embed_dim, - embed_dict=decoder_embed_dict, - convolutions=eval(args.decoder_layers), - out_embed_dim=args.decoder_out_embed_dim, - attention=eval(args.decoder_attention), - dropout=args.dropout, - max_positions=args.max_target_positions, - share_embed=args.share_input_output_embed, - ) - return FConvModel(encoder, decoder) - - -class FConvEncoder(FairseqEncoder): - """ - Convolutional encoder consisting of `len(convolutions)` layers. - - Args: - dictionary (~fairseq.data.Dictionary): encoding dictionary - embed_dim (int, optional): embedding dimension - embed_dict (str, optional): filename from which to load pre-trained - embeddings - max_positions (int, optional): maximum supported input sequence length - convolutions (list, optional): the convolutional layer structure. Each - list item `i` corresponds to convolutional layer `i`. Layers are - given as ``(out_channels, kernel_width, [residual])``. Residual - connections are added between layers when ``residual=1`` (which is - the default behavior). - dropout (float, optional): dropout to be applied before each conv layer - """ - - def __init__( - self, - dictionary, - embed_dim=512, - embed_dict=None, - max_positions=1024, - convolutions=((512, 3),) * 20, - dropout=0.1, - ): - super().__init__(dictionary) - self.dropout_module = FairseqDropout( - dropout, module_name=self.__class__.__name__ - ) - self.num_attention_layers = None - - num_embeddings = len(dictionary) - self.padding_idx = dictionary.pad() - self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) - if embed_dict: - self.embed_tokens = utils.load_embedding( - embed_dict, self.dictionary, self.embed_tokens - ) - - self.embed_positions = PositionalEmbedding( - max_positions, - embed_dim, - self.padding_idx, - ) - - convolutions = extend_conv_spec(convolutions) - in_channels = convolutions[0][0] - self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) - self.projections = nn.ModuleList() - self.convolutions = nn.ModuleList() - self.residuals = [] - - layer_in_channels = [in_channels] - for _, (out_channels, kernel_size, residual) in enumerate(convolutions): - if residual == 0: - residual_dim = out_channels - else: - residual_dim = layer_in_channels[-residual] - self.projections.append( - Linear(residual_dim, out_channels) - if residual_dim != out_channels - else None - ) - if kernel_size % 2 == 1: - padding = kernel_size // 2 - else: - padding = 0 - self.convolutions.append( - ConvTBC( - in_channels, - out_channels * 2, - kernel_size, - dropout=dropout, - padding=padding, - ) - ) - self.residuals.append(residual) - in_channels = out_channels - layer_in_channels.append(out_channels) - self.fc2 = Linear(in_channels, embed_dim) - - def forward(self, src_tokens, src_lengths): - """ - Args: - src_tokens (LongTensor): tokens in the source language of shape - `(batch, src_len)` - src_lengths (LongTensor): lengths of each source sentence of shape - `(batch)` - - Returns: - dict: - - **encoder_out** (tuple): a tuple with two elements, where the - first element is the last encoder layer's output and the - second element is the same quantity summed with the input - embedding (used for attention). The shape of both tensors is - `(batch, src_len, embed_dim)`. - - **encoder_padding_mask** (ByteTensor): the positions of - padding elements of shape `(batch, src_len)` - """ - # embed tokens and positions - x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens) - x = self.dropout_module(x) - input_embedding = x - - # project to size of convolution - x = self.fc1(x) - - # used to mask padding in input - encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B - if not encoder_padding_mask.any(): - encoder_padding_mask = None - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - residuals = [x] - # temporal convolutions - for proj, conv, res_layer in zip( - self.projections, self.convolutions, self.residuals - ): - if res_layer > 0: - residual = residuals[-res_layer] - residual = residual if proj is None else proj(residual) - else: - residual = None - - if encoder_padding_mask is not None: - x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) - - x = self.dropout_module(x) - if conv.kernel_size[0] % 2 == 1: - # padding is implicit in the conv - x = conv(x) - else: - padding_l = (conv.kernel_size[0] - 1) // 2 - padding_r = conv.kernel_size[0] // 2 - x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r)) - x = conv(x) - x = F.glu(x, dim=2) - - if residual is not None: - x = (x + residual) * math.sqrt(0.5) - residuals.append(x) - - # T x B x C -> B x T x C - x = x.transpose(1, 0) - - # project back to size of embedding - x = self.fc2(x) - - if encoder_padding_mask is not None: - encoder_padding_mask = encoder_padding_mask.t() # -> B x T - x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0) - - # scale gradients (this only affects backward, not forward) - x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers)) - - # add output to input embedding for attention - y = (x + input_embedding) * math.sqrt(0.5) - - return { - "encoder_out": (x, y), - "encoder_padding_mask": encoder_padding_mask, # B x T - } - - def reorder_encoder_out(self, encoder_out, new_order): - if encoder_out["encoder_out"] is not None: - encoder_out["encoder_out"] = ( - encoder_out["encoder_out"][0].index_select(0, new_order), - encoder_out["encoder_out"][1].index_select(0, new_order), - ) - if encoder_out["encoder_padding_mask"] is not None: - encoder_out["encoder_padding_mask"] = encoder_out[ - "encoder_padding_mask" - ].index_select(0, new_order) - return encoder_out - - def max_positions(self): - """Maximum input length supported by the encoder.""" - return self.embed_positions.max_positions - - -class AttentionLayer(nn.Module): - def __init__(self, conv_channels, embed_dim, bmm=None): - super().__init__() - # projects from output of convolution to embedding dimension - self.in_projection = Linear(conv_channels, embed_dim) - # projects from embedding dimension to convolution size - self.out_projection = Linear(embed_dim, conv_channels) - - self.bmm = bmm if bmm is not None else torch.bmm - - def forward(self, x, target_embedding, encoder_out, encoder_padding_mask): - residual = x - - # attention - x = (self.in_projection(x) + target_embedding) * math.sqrt(0.5) - x = self.bmm(x, encoder_out[0]) - - # don't attend over padding - if encoder_padding_mask is not None: - x = ( - x.float() - .masked_fill(encoder_padding_mask.unsqueeze(1), float("-inf")) - .type_as(x) - ) # FP16 support: cast to float and back - - # softmax over last dim - sz = x.size() - x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1) - x = x.view(sz) - attn_scores = x - - x = self.bmm(x, encoder_out[1]) - - # scale attention output (respecting potentially different lengths) - s = encoder_out[1].size(1) - if encoder_padding_mask is None: - x = x * (s * math.sqrt(1.0 / s)) - else: - s = s - encoder_padding_mask.type_as(x).sum( - dim=1, keepdim=True - ) # exclude padding - s = s.unsqueeze(-1) - x = x * (s * s.rsqrt()) - - # project back - x = (self.out_projection(x) + residual) * math.sqrt(0.5) - return x, attn_scores - - def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs): - """Replace torch.bmm with BeamableMM.""" - if beamable_mm_beam_size is not None: - del self.bmm - self.add_module("bmm", BeamableMM(beamable_mm_beam_size)) - - -class FConvDecoder(FairseqIncrementalDecoder): - """Convolutional decoder""" - - def __init__( - self, - dictionary, - embed_dim=512, - embed_dict=None, - out_embed_dim=256, - max_positions=1024, - convolutions=((512, 3),) * 20, - attention=True, - dropout=0.1, - share_embed=False, - positional_embeddings=True, - adaptive_softmax_cutoff=None, - adaptive_softmax_dropout=0.0, - ): - super().__init__(dictionary) - self.register_buffer("version", torch.Tensor([2])) - self.dropout_module = FairseqDropout( - dropout, module_name=self.__class__.__name__ - ) - self.need_attn = True - - convolutions = extend_conv_spec(convolutions) - in_channels = convolutions[0][0] - if isinstance(attention, bool): - # expand True into [True, True, ...] and do the same with False - attention = [attention] * len(convolutions) - if not isinstance(attention, list) or len(attention) != len(convolutions): - raise ValueError( - "Attention is expected to be a list of booleans of " - "length equal to the number of layers." - ) - - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) - if embed_dict: - self.embed_tokens = utils.load_embedding( - embed_dict, self.dictionary, self.embed_tokens - ) - - self.embed_positions = ( - PositionalEmbedding( - max_positions, - embed_dim, - padding_idx, - ) - if positional_embeddings - else None - ) - - self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) - self.projections = nn.ModuleList() - self.convolutions = nn.ModuleList() - self.attention = nn.ModuleList() - self.residuals = [] - - layer_in_channels = [in_channels] - for i, (out_channels, kernel_size, residual) in enumerate(convolutions): - if residual == 0: - residual_dim = out_channels - else: - residual_dim = layer_in_channels[-residual] - self.projections.append( - Linear(residual_dim, out_channels) - if residual_dim != out_channels - else None - ) - self.convolutions.append( - LinearizedConv1d( - in_channels, - out_channels * 2, - kernel_size, - padding=(kernel_size - 1), - dropout=dropout, - ) - ) - self.attention.append( - AttentionLayer(out_channels, embed_dim) if attention[i] else None - ) - self.residuals.append(residual) - in_channels = out_channels - layer_in_channels.append(out_channels) - - self.adaptive_softmax = None - self.fc2 = self.fc3 = None - - if adaptive_softmax_cutoff is not None: - assert not share_embed - self.adaptive_softmax = AdaptiveSoftmax( - num_embeddings, - in_channels, - adaptive_softmax_cutoff, - dropout=adaptive_softmax_dropout, - ) - else: - self.fc2 = Linear(in_channels, out_embed_dim) - if share_embed: - assert out_embed_dim == embed_dim, ( - "Shared embed weights implies same dimensions " - " out_embed_dim={} vs embed_dim={}".format(out_embed_dim, embed_dim) - ) - self.fc3 = nn.Linear(out_embed_dim, num_embeddings) - self.fc3.weight = self.embed_tokens.weight - else: - self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout) - - def forward( - self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused - ): - if encoder_out is not None: - encoder_padding_mask = encoder_out["encoder_padding_mask"] - encoder_out = encoder_out["encoder_out"] - - # split and transpose encoder outputs - encoder_a, encoder_b = self._split_encoder_out( - encoder_out, incremental_state - ) - - if self.embed_positions is not None: - pos_embed = self.embed_positions(prev_output_tokens, incremental_state) - else: - pos_embed = 0 - - if incremental_state is not None: - prev_output_tokens = prev_output_tokens[:, -1:] - x = self._embed_tokens(prev_output_tokens, incremental_state) - - # embed tokens and combine with positional embeddings - x += pos_embed - x = self.dropout_module(x) - target_embedding = x - - # project to size of convolution - x = self.fc1(x) - - # B x T x C -> T x B x C - x = self._transpose_if_training(x, incremental_state) - - # temporal convolutions - avg_attn_scores = None - num_attn_layers = len(self.attention) - residuals = [x] - for proj, conv, attention, res_layer in zip( - self.projections, self.convolutions, self.attention, self.residuals - ): - if res_layer > 0: - residual = residuals[-res_layer] - residual = residual if proj is None else proj(residual) - else: - residual = None - - x = self.dropout_module(x) - x = conv(x, incremental_state) - x = F.glu(x, dim=2) - - # attention - if attention is not None: - x = self._transpose_if_training(x, incremental_state) - - x, attn_scores = attention( - x, target_embedding, (encoder_a, encoder_b), encoder_padding_mask - ) - - if not self.training and self.need_attn: - attn_scores = attn_scores / num_attn_layers - if avg_attn_scores is None: - avg_attn_scores = attn_scores - else: - avg_attn_scores.add_(attn_scores) - - x = self._transpose_if_training(x, incremental_state) - - # residual - if residual is not None: - x = (x + residual) * math.sqrt(0.5) - residuals.append(x) - - # T x B x C -> B x T x C - x = self._transpose_if_training(x, incremental_state) - - # project back to size of vocabulary if not using adaptive softmax - if self.fc2 is not None and self.fc3 is not None: - x = self.fc2(x) - x = self.dropout_module(x) - x = self.fc3(x) - - return x, avg_attn_scores - - def reorder_incremental_state(self, incremental_state, new_order): - super().reorder_incremental_state(incremental_state, new_order) - encoder_out = utils.get_incremental_state( - self, incremental_state, "encoder_out" - ) - if encoder_out is not None: - encoder_out = tuple(eo.index_select(0, new_order) for eo in encoder_out) - utils.set_incremental_state( - self, incremental_state, "encoder_out", encoder_out - ) - - def max_positions(self): - """Maximum output length supported by the decoder.""" - return ( - self.embed_positions.max_positions - if self.embed_positions is not None - else float("inf") - ) - - def upgrade_state_dict(self, state_dict): - if utils.item(state_dict.get("decoder.version", torch.Tensor([1]))[0]) < 2: - # old models use incorrect weight norm dimension - for i, conv in enumerate(self.convolutions): - # reconfigure weight norm - nn.utils.remove_weight_norm(conv) - self.convolutions[i] = nn.utils.weight_norm(conv, dim=0) - state_dict["decoder.version"] = torch.Tensor([1]) - return state_dict - - def make_generation_fast_(self, need_attn=False, **kwargs): - self.need_attn = need_attn - - def _embed_tokens(self, tokens, incremental_state): - if incremental_state is not None: - # keep only the last token for incremental forward pass - tokens = tokens[:, -1:] - return self.embed_tokens(tokens) - - def _split_encoder_out(self, encoder_out, incremental_state): - """Split and transpose encoder outputs. - - This is cached when doing incremental inference. - """ - cached_result = utils.get_incremental_state( - self, incremental_state, "encoder_out" - ) - if cached_result is not None: - return cached_result - - # transpose only once to speed up attention layers - encoder_a, encoder_b = encoder_out - encoder_a = encoder_a.transpose(1, 2).contiguous() - result = (encoder_a, encoder_b) - - if incremental_state is not None: - utils.set_incremental_state(self, incremental_state, "encoder_out", result) - return result - - def _transpose_if_training(self, x, incremental_state): - if incremental_state is None: - x = x.transpose(0, 1) - return x - - -def extend_conv_spec(convolutions): - """ - Extends convolutional spec that is a list of tuples of 2 or 3 parameters - (kernel size, dim size and optionally how many layers behind to look for residual) - to default the residual propagation param if it is not specified - """ - extended = [] - for spec in convolutions: - if len(spec) == 3: - extended.append(spec) - elif len(spec) == 2: - extended.append(spec + (1,)) - else: - raise Exception( - "invalid number of parameters in convolution spec " - + str(spec) - + ". expected 2 or 3" - ) - return tuple(extended) - - -def Embedding(num_embeddings, embedding_dim, padding_idx): - m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) - nn.init.normal_(m.weight, 0, 0.1) - nn.init.constant_(m.weight[padding_idx], 0) - return m - - -def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx): - m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) - nn.init.normal_(m.weight, 0, 0.1) - nn.init.constant_(m.weight[padding_idx], 0) - return m - - -def Linear(in_features, out_features, dropout=0.0): - """Weight-normalized Linear layer (input: N x T x C)""" - m = nn.Linear(in_features, out_features) - nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_features)) - nn.init.constant_(m.bias, 0) - return nn.utils.weight_norm(m) - - -def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): - """Weight-normalized Conv1d layer optimized for decoding""" - m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) - std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) - nn.init.normal_(m.weight, mean=0, std=std) - nn.init.constant_(m.bias, 0) - return nn.utils.weight_norm(m, dim=2) - - -def ConvTBC(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs): - """Weight-normalized Conv1d layer""" - from fairseq.modules import ConvTBC - - m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs) - std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) - nn.init.normal_(m.weight, mean=0, std=std) - nn.init.constant_(m.bias, 0) - return nn.utils.weight_norm(m, dim=2) - - -@register_model_architecture("fconv", "fconv") -def base_architecture(args): - args.dropout = getattr(args, "dropout", 0.1) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_embed_path = getattr(args, "encoder_embed_path", None) - args.encoder_layers = getattr(args, "encoder_layers", "[(512, 3)] * 20") - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) - args.decoder_embed_path = getattr(args, "decoder_embed_path", None) - args.decoder_layers = getattr(args, "decoder_layers", "[(512, 3)] * 20") - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) - args.decoder_attention = getattr(args, "decoder_attention", "True") - args.share_input_output_embed = getattr(args, "share_input_output_embed", False) - - -@register_model_architecture("fconv", "fconv_iwslt_de_en") -def fconv_iwslt_de_en(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) - args.encoder_layers = getattr(args, "encoder_layers", "[(256, 3)] * 4") - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) - args.decoder_layers = getattr(args, "decoder_layers", "[(256, 3)] * 3") - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256) - base_architecture(args) - - -@register_model_architecture("fconv", "fconv_wmt_en_ro") -def fconv_wmt_en_ro(args): - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) - base_architecture(args) - - -@register_model_architecture("fconv", "fconv_wmt_en_de") -def fconv_wmt_en_de(args): - convs = "[(512, 3)] * 9" # first 9 layers have 512 units - convs += " + [(1024, 3)] * 4" # next 4 layers have 1024 units - convs += " + [(2048, 1)] * 2" # final 2 layers use 1x1 convolutions - - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) - args.encoder_layers = getattr(args, "encoder_layers", convs) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768) - args.decoder_layers = getattr(args, "decoder_layers", convs) - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) - base_architecture(args) - - -@register_model_architecture("fconv", "fconv_wmt_en_fr") -def fconv_wmt_en_fr(args): - convs = "[(512, 3)] * 6" # first 6 layers have 512 units - convs += " + [(768, 3)] * 4" # next 4 layers have 768 units - convs += " + [(1024, 3)] * 3" # next 3 layers have 1024 units - convs += " + [(2048, 1)] * 1" # next 1 layer uses 1x1 convolutions - convs += " + [(4096, 1)] * 1" # final 1 layer uses 1x1 convolutions - - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) - args.encoder_layers = getattr(args, "encoder_layers", convs) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768) - args.decoder_layers = getattr(args, "decoder_layers", convs) - args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) - base_architecture(args) diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/scoring/bleu.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/scoring/bleu.py deleted file mode 100644 index 97de5f966ec08e5a304c41358e67755c601622b7..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/scoring/bleu.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import ctypes -import math -import sys -from dataclasses import dataclass, field - -import torch -from fairseq.dataclass import FairseqDataclass -from fairseq.scoring import BaseScorer, register_scorer -from fairseq.scoring.tokenizer import EvaluationTokenizer - - -class BleuStat(ctypes.Structure): - _fields_ = [ - ("reflen", ctypes.c_size_t), - ("predlen", ctypes.c_size_t), - ("match1", ctypes.c_size_t), - ("count1", ctypes.c_size_t), - ("match2", ctypes.c_size_t), - ("count2", ctypes.c_size_t), - ("match3", ctypes.c_size_t), - ("count3", ctypes.c_size_t), - ("match4", ctypes.c_size_t), - ("count4", ctypes.c_size_t), - ] - - -@dataclass -class SacrebleuConfig(FairseqDataclass): - sacrebleu_tokenizer: EvaluationTokenizer.ALL_TOKENIZER_TYPES = field( - default="13a", metadata={"help": "tokenizer"} - ) - sacrebleu_lowercase: bool = field( - default=False, metadata={"help": "apply lowercasing"} - ) - sacrebleu_char_level: bool = field( - default=False, metadata={"help": "evaluate at character level"} - ) - - -@register_scorer("sacrebleu", dataclass=SacrebleuConfig) -class SacrebleuScorer(BaseScorer): - def __init__(self, cfg): - super(SacrebleuScorer, self).__init__(cfg) - import sacrebleu - - self.sacrebleu = sacrebleu - self.tokenizer = EvaluationTokenizer( - tokenizer_type=cfg.sacrebleu_tokenizer, - lowercase=cfg.sacrebleu_lowercase, - character_tokenization=cfg.sacrebleu_char_level, - ) - - def add_string(self, ref, pred): - self.ref.append(self.tokenizer.tokenize(ref)) - self.pred.append(self.tokenizer.tokenize(pred)) - - def score(self, order=4): - return self.result_string(order).score - - def result_string(self, order=4): - if order != 4: - raise NotImplementedError - # tokenization and lowercasing are performed by self.tokenizer instead. - return self.sacrebleu.corpus_bleu( - self.pred, [self.ref], tokenize="none" - ).format() - - -@dataclass -class BleuConfig(FairseqDataclass): - pad: int = field(default=1, metadata={"help": "padding index"}) - eos: int = field(default=2, metadata={"help": "eos index"}) - unk: int = field(default=3, metadata={"help": "unk index"}) - - -@register_scorer("bleu", dataclass=BleuConfig) -class Scorer(object): - def __init__(self, cfg): - self.stat = BleuStat() - self.pad = cfg.pad - self.eos = cfg.eos - self.unk = cfg.unk - - try: - from fairseq import libbleu - except ImportError as e: - sys.stderr.write( - "ERROR: missing libbleu.so. run `pip install --editable .`\n" - ) - raise e - - self.C = ctypes.cdll.LoadLibrary(libbleu.__file__) - - self.reset() - - def reset(self, one_init=False): - if one_init: - self.C.bleu_one_init(ctypes.byref(self.stat)) - else: - self.C.bleu_zero_init(ctypes.byref(self.stat)) - - def add(self, ref, pred): - if not isinstance(ref, torch.IntTensor): - raise TypeError("ref must be a torch.IntTensor (got {})".format(type(ref))) - if not isinstance(pred, torch.IntTensor): - raise TypeError("pred must be a torch.IntTensor(got {})".format(type(pred))) - - # don't match unknown words - rref = ref.clone() - assert not rref.lt(0).any() - rref[rref.eq(self.unk)] = -999 - - rref = rref.contiguous().view(-1) - pred = pred.contiguous().view(-1) - - self.C.bleu_add( - ctypes.byref(self.stat), - ctypes.c_size_t(rref.size(0)), - ctypes.c_void_p(rref.data_ptr()), - ctypes.c_size_t(pred.size(0)), - ctypes.c_void_p(pred.data_ptr()), - ctypes.c_int(self.pad), - ctypes.c_int(self.eos), - ) - - def score(self, order=4): - psum = sum( - math.log(p) if p > 0 else float("-Inf") for p in self.precision()[:order] - ) - return self.brevity() * math.exp(psum / order) * 100 - - def precision(self): - def ratio(a, b): - return a / b if b > 0 else 0 - - return [ - ratio(self.stat.match1, self.stat.count1), - ratio(self.stat.match2, self.stat.count2), - ratio(self.stat.match3, self.stat.count3), - ratio(self.stat.match4, self.stat.count4), - ] - - def brevity(self): - r = self.stat.reflen / self.stat.predlen - return min(1, math.exp(1 - r)) - - def result_string(self, order=4): - assert order <= 4, "BLEU scores for order > 4 aren't supported" - fmt = "BLEU{} = {:2.2f}, {:2.1f}" - for _ in range(1, order): - fmt += "/{:2.1f}" - fmt += " (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})" - bleup = [p * 100 for p in self.precision()[:order]] - return fmt.format( - order, - self.score(order=order), - *bleup, - self.brevity(), - self.stat.predlen / self.stat.reflen, - self.stat.predlen, - self.stat.reflen - ) diff --git a/spaces/mshukor/UnIVAL/run_scripts/caption/scaling_best/onlylinear/unival_video_caption_s1_onlylinear.sh b/spaces/mshukor/UnIVAL/run_scripts/caption/scaling_best/onlylinear/unival_video_caption_s1_onlylinear.sh deleted file mode 100644 index 223f39bb43e245fc9483a76c2618136e52ff9503..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/run_scripts/caption/scaling_best/onlylinear/unival_video_caption_s1_onlylinear.sh +++ /dev/null @@ -1,206 +0,0 @@ - - -# Number of GPUs per GPU worker -export GPUS_PER_NODE=8 -# Number of GPU workers, for single-worker training, please set to 1 -export NUM_NODES=$SLURM_NNODES -# The ip address of the rank-0 worker, for single-worker training, please set to localhost -master_addr=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) -export MASTER_ADDR=$master_addr - -# The port for communication -export MASTER_PORT=12350 -# The rank of this worker, should be in {0, ..., WORKER_CNT-1}, for single-worker training, please set to 0 -export RANK=$SLURM_NODEID - -echo "MASTER_ADDR: $MASTER_ADDR" -echo "RANK :$RANK" -echo "NUM_NODES :$NUM_NODES" -echo "GPUS_PER_NODE :$GPUS_PER_NODE" - -export MIOPEN_USER_DB_PATH=/lus/home/NAT/gda2204/mshukor/.config/miopen_${MASTER_ADDR}_${SLURM_PROCID}/ - -echo "MIOPEN_USER_DB_PATH :$MIOPEN_USER_DB_PATH" - -num_workers=0 - - -exp_name=unival_video_caption_s1_onlylinear - - - -ofa_dir=/lus/home/NAT/gda2204/mshukor/code/unival -base_data_dir=/lus/scratch/NAT/gda2204/SHARED/data -base_log_dir=/work/NAT/gda2204/mshukor/logs - -save_base_log_dir=/lus/scratch/NAT/gda2204/SHARED/logs -save_dir=${save_base_log_dir}/ofa/checkpoints/caption/${exp_name} -log_dir=${save_dir} - -mkdir -p $log_dir $save_dir - -bpe_dir=${ofa_dir}/utils/BPE -user_dir=${ofa_dir}/ofa_module - - - -image_dir=${base_data_dir} - - -data_dir=${base_data_dir}/ofa/video_data/caption_data -data=${data_dir}/msrvtt_caption_train7k_1.tsv,${data_dir}/msrvtt_caption_train7k_2.tsv,${data_dir}/msrvtt_caption_train7k_3.tsv,${data_dir}/msrvtt_caption_train7k_4.tsv,${data_dir}/msrvtt_caption_train7k_5.tsv,${data_dir}/msrvtt_caption_train7k_6.tsv,${data_dir}/msrvtt_caption_train7k_7.tsv,${data_dir}/msrvtt_caption_train7k_8.tsv,${data_dir}/msrvtt_caption_train7k_9.tsv,${data_dir}/msrvtt_caption_train7k_10.tsv,${data_dir}/msrvtt_caption_test3k.tsv -eval_cider_cached=${data_dir}/cider_cached_tokens/msrvtt-test3k-words.p - - -restore_file=${base_log_dir}/ofa/checkpoints/pretrain/unival_s1/checkpoint15.pt - -selected_cols=0,4,2 - -task=video_caption -arch=unival_base -pretrained_model= - - -criterion=adjust_label_smoothed_encouraging_loss -label_smoothing=0.1 -lr=1e-3 -max_epoch=25 -warmup_ratio=0.06 -batch_size=16 -update_freq=2 -resnet_drop_path_rate=0.0 -encoder_drop_path_rate=0.1 -decoder_drop_path_rate=0.1 -dropout=0.1 -attention_dropout=0.0 -max_src_length=80 -max_tgt_length=20 -num_bins=1000 -# patch_image_size=480 -drop_worst_ratio=0.2 - - - - -### -image_encoder_name=timm_resnet #vit_base_patch16_224 -patch_image_size=480 -resnet_type=resnet101 - -resnet_model_path=${base_log_dir}/pretrained_models/resnet101-5d3b4d8f.pth - -# video -video_encoder_name=all_resnext101 -patch_frame_size=384 -video_model_path=${base_log_dir}/pretrained_models/3dcnn/resnext-101-kinetics.pth #${base_log_dir}/pretrained_models/TimeSformer_divST_8x32_224_K600.pyth -num_frames=16 - - -save_interval=1 -validate_interval_updates=2000 -save_interval_updates=0 - - -sample_patch_num='--sample-patch-num=784' # '' - -eval_args='--eval-args={"beam":5,"unnormalized":true,"temperature":1.0,"stop_on_max_len":true}' - -drop_worst_ratio=0.05 # modified from 0.2 for el -log_end=0.75 # for el -drop_best_ratio=0.05 -drop_best_after=6000 -drop_worst_after=6000 - -use_dataaug='--use-dataaug' - -for max_epoch in {$max_epoch,}; do - echo "max_epoch "${max_epoch} - for warmup_ratio in {0.06,}; do - echo "warmup_ratio "${warmup_ratio} - for drop_worst_after in {6000,}; do - echo "drop_worst_after "${drop_worst_after} - - log_file=${log_dir}/${max_epoch}"_"${warmup_ratio}"_"${drop_worst_after}".log" - save_path=${save_dir}/${max_epoch}"_"${warmup_ratio}"_"${drop_worst_after} - mkdir -p $save_path - - python3 -m torch.distributed.launch \ - --nnodes=${NUM_NODES} \ - --nproc_per_node=${GPUS_PER_NODE} \ - --master_port=${MASTER_PORT} \ - --node_rank=${RANK} \ - --master_addr=${MASTER_ADDR} \ - --use_env ${ofa_dir}/train.py \ - $data \ - --selected-cols=${selected_cols} \ - --bpe-dir=${bpe_dir} \ - --user-dir=${user_dir} \ - --restore-file=${restore_file} \ - --save-dir=${save_path} \ - --task=${task} \ - --arch=${arch} \ - --criterion=${criterion} \ - --label-smoothing=${label_smoothing} \ - --batch-size=${batch_size} \ - --update-freq=${update_freq} \ - --encoder-normalize-before \ - --decoder-normalize-before \ - --share-decoder-input-output-embed \ - --share-all-embeddings \ - --layernorm-embedding \ - --patch-layernorm-embedding \ - --code-layernorm-embedding \ - --resnet-drop-path-rate=${resnet_drop_path_rate} \ - --encoder-drop-path-rate=${encoder_drop_path_rate} \ - --decoder-drop-path-rate=${decoder_drop_path_rate} \ - --dropout=${dropout} \ - --attention-dropout=${attention_dropout} \ - --weight-decay=0.01 --optimizer=adam --adam-betas="(0.9,0.999)" --adam-eps=1e-08 --clip-norm=1.0 \ - --lr-scheduler=polynomial_decay --lr=${lr} \ - --max-epoch=${max_epoch} --warmup-ratio=${warmup_ratio} \ - --log-format=simple --log-interval=10 \ - --fixed-validation-seed=7 \ - --no-epoch-checkpoints --keep-best-checkpoints=1 \ - --save-interval=${save_interval} --validate-interval=1 \ - --save-interval-updates=${save_interval_updates} --validate-interval-updates=${validate_interval_updates} \ - --eval-cider \ - --eval-cider-cached-tokens=${eval_cider_cached} \ - --eval-args='{"beam":5,"max_len_b":16,"no_repeat_ngram_size":3}' \ - --best-checkpoint-metric=cider --maximize-best-checkpoint-metric \ - --max-src-length=${max_src_length} \ - --max-tgt-length=${max_tgt_length} \ - --find-unused-parameters \ - --freeze-encoder-embedding \ - --freeze-decoder-embedding \ - --add-type-embedding \ - --scale-attn \ - --scale-fc \ - --scale-heads \ - --disable-entangle \ - --num-bins=${num_bins} \ - --patch-image-size=${patch_image_size} \ - --drop-worst-ratio=${drop_worst_ratio} \ - --drop-worst-after=${drop_worst_after} \ - --fp16 \ - --fp16-scale-window=512 \ - --num-workers=0 \ - --image-encoder-name=${image_encoder_name} \ - --image-dir=${image_dir} \ - --video-encoder-name=${video_encoder_name} \ - --video-model-path=${video_model_path} \ - --patch-frame-size=${patch_frame_size} \ - ${sample_patch_num} \ - ${eval_args} \ - --num-frames=${num_frames} \ - --freeze-encoder \ - --freeze-decoder \ - --freeze-audio-encoder \ - --freeze-image-encoder \ - --freeze-video-encoder \ - --log-end ${log_end} --drop-best-ratio ${drop_best_ratio} --drop-best-after ${drop_best_after} \ - ${use_dataaug} \ - --reset-dataloader --reset-meters --reset-optimizer - - done - done -done \ No newline at end of file diff --git a/spaces/muchuam/anime-remove-background/app.py b/spaces/muchuam/anime-remove-background/app.py deleted file mode 100644 index 230a0d5f8a3da6ab18ecb8db1cd90016a489b96a..0000000000000000000000000000000000000000 --- a/spaces/muchuam/anime-remove-background/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import gradio as gr -import huggingface_hub -import onnxruntime as rt -import numpy as np -import cv2 - - -def get_mask(img, s=1024): - img = (img / 255).astype(np.float32) - h, w = h0, w0 = img.shape[:-1] - h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s) - ph, pw = s - h, s - w - img_input = np.zeros([s, s, 3], dtype=np.float32) - img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h)) - img_input = np.transpose(img_input, (2, 0, 1)) - img_input = img_input[np.newaxis, :] - mask = rmbg_model.run(None, {'img': img_input})[0][0] - mask = np.transpose(mask, (1, 2, 0)) - mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] - mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis] - return mask - - -def rmbg_fn(img): - mask = get_mask(img) - img = (mask * img + 255 * (1 - mask)).astype(np.uint8) - mask = (mask * 255).astype(np.uint8) - img = np.concatenate([img, mask], axis=2, dtype=np.uint8) - mask = mask.repeat(3, axis=2) - return mask, img - - -if __name__ == "__main__": - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] - model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx") - rmbg_model = rt.InferenceSession(model_path, providers=providers) - app = gr.Blocks() - with app: - gr.Markdown("# Anime Remove Background\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.animeseg)\n\n" - "demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)") - with gr.Row(): - with gr.Column(): - input_img = gr.Image(label="input image") - examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)] - examples = gr.Dataset(components=[input_img], samples=examples_data) - run_btn = gr.Button(variant="primary") - output_mask = gr.Image(label="mask") - output_img = gr.Image(label="result", image_mode="RGBA") - examples.click(lambda x: x[0], [examples], [input_img]) - run_btn.click(rmbg_fn, [input_img], [output_mask, output_img]) - app.launch() diff --git a/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/plugin/math/plugin.js b/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/plugin/math/plugin.js deleted file mode 100644 index a92ccfb7cdc862f17973d52fd44085256f5f2c29..0000000000000000000000000000000000000000 --- a/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/plugin/math/plugin.js +++ /dev/null @@ -1,15 +0,0 @@ -import {KaTeX} from "./katex"; -import {MathJax2} from "./mathjax2"; -import {MathJax3} from "./mathjax3"; - -const defaultTypesetter = MathJax2; - -/*! - * This plugin is a wrapper for the MathJax2, - * MathJax3 and KaTeX typesetter plugins. - */ -export default Plugin = Object.assign( defaultTypesetter(), { - KaTeX, - MathJax2, - MathJax3 -} ); \ No newline at end of file diff --git a/spaces/nateraw/cryptopunks-generator/app.py b/spaces/nateraw/cryptopunks-generator/app.py deleted file mode 100644 index 9516c2c604d47ef9e82739e78c3f0fa42eeaf10f..0000000000000000000000000000000000000000 --- a/spaces/nateraw/cryptopunks-generator/app.py +++ /dev/null @@ -1,97 +0,0 @@ -import subprocess -from pathlib import Path - -import einops -import gradio as gr -import numpy as np -import torch -from huggingface_hub import hf_hub_download -from PIL import Image -from torch import nn -from torchvision.utils import save_image - - -class Generator(nn.Module): - def __init__(self, nc=4, nz=100, ngf=64): - super(Generator, self).__init__() - self.network = nn.Sequential( - nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False), - nn.BatchNorm2d(ngf * 4), - nn.ReLU(True), - nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False), - nn.BatchNorm2d(ngf * 2), - nn.ReLU(True), - nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False), - nn.BatchNorm2d(ngf), - nn.ReLU(True), - nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), - nn.Tanh(), - ) - - def forward(self, input): - output = self.network(input) - return output - - -model = Generator() -weights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth') -model.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) - - -@torch.no_grad() -def interpolate(save_dir='./lerp/', frames=100, rows=8, cols=8): - save_dir = Path(save_dir) - save_dir.mkdir(exist_ok=True, parents=True) - - z1 = torch.randn(rows * cols, 100, 1, 1) - z2 = torch.randn(rows * cols, 100, 1, 1) - - zs = [] - for i in range(frames): - alpha = i / frames - z = (1 - alpha) * z1 + alpha * z2 - zs.append(z) - - zs += zs[::-1] # also go in reverse order to complete loop - - for i, z in enumerate(zs): - imgs = model(z) - - # normalize - imgs = (imgs + 1) / 2 - - imgs = (imgs.permute(0, 2, 3, 1).cpu().numpy() * 255).astype(np.uint8) - - # create grid - imgs = einops.rearrange(imgs, "(b1 b2) h w c -> (b1 h) (b2 w) c", b1=rows, b2=cols) - - Image.fromarray(imgs).save(save_dir / f"{i:03}.png") - - subprocess.call(f"convert -dispose previous -delay 10 -loop 0 {save_dir}/*.png out.gif".split()) - - -def predict(choice, seed): - torch.manual_seed(seed) - - if choice == 'interpolation': - interpolate() - return 'out.gif' - else: - z = torch.randn(64, 100, 1, 1) - punks = model(z) - save_image(punks, "punks.png", normalize=True) - return 'punks.png' - - -gr.Interface( - predict, - inputs=[ - gr.inputs.Dropdown(['image', 'interpolation'], label='Output Type'), - gr.inputs.Slider(label='Seed', minimum=0, maximum=1000, default=42), - ], - outputs="image", - title="Cryptopunks GAN", - description="These CryptoPunks do not exist. You have the choice of either generating random punks, or a gif showing the interpolation between two random punk grids.", - article="

    Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks | Github Repo

    ", - examples=[["interpolation", 123], ["interpolation", 42], ["image", 456], ["image", 42]], -).launch(cache_examples=True) diff --git a/spaces/nathanTQ/ChatDev/online_log/static/chain_visualizer.html b/spaces/nathanTQ/ChatDev/online_log/static/chain_visualizer.html deleted file mode 100644 index ee50449de5b621fd336e0e11854e2db274ad8999..0000000000000000000000000000000000000000 --- a/spaces/nathanTQ/ChatDev/online_log/static/chain_visualizer.html +++ /dev/null @@ -1,131 +0,0 @@ - - - - - ChatChain Visualizer - - - - - -
    -

    ChatChain Visualizer

    -

    Select your ChatChainConfig.json to visualize

    - -
    -
    - - - - - \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/AspenTech Aspen Exchanger Design Rating 7.3.rar _HOT_.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/AspenTech Aspen Exchanger Design Rating 7.3.rar _HOT_.md deleted file mode 100644 index 2a8e60356f3fea36152f8fe25f2facef7f192763..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/AspenTech Aspen Exchanger Design Rating 7.3.rar _HOT_.md +++ /dev/null @@ -1,20 +0,0 @@ - -

    How to Optimize Heat Exchanger Design with AspenTech Aspen Exchanger Design Rating 7.3.rar

    -

    Heat exchangers are critical components of many industrial processes, such as chemical production, oil refining, power generation and more. They can also account for a significant portion of the capital and operational costs of a plant. Therefore, it is important to design heat exchangers that are efficient, reliable and cost-effective.

    -

    AspenTech Aspen Exchanger Design Rating 7.3.rar


    Downloadhttps://urlcod.com/2uIauC



    -

    One of the tools that can help engineers achieve these goals is AspenTech Aspen Exchanger Design Rating 7.3.rar, a software package that integrates rigorous process models with the industry's most comprehensive heat exchanger modeling capabilities. With this software, engineers can:

    -
      -
    • Design and rate all major types of heat exchangers, including shell and tube, air-cooled, plate, plate-fin, coil-wound and fired heaters.
    • -
    • Access the latest research and standards from Aspen HTFS®, the world's leading authority on heat transfer research.
    • -
    • Leverage the best-in-class physical properties methods and data from Aspen Properties®, which covers over 37,000 components and 127 property packages.
    • -
    • Optimize heat exchanger performance and minimize capital and energy costs by integrating with Aspen Plus® and Aspen HYSYS®, the leading process simulation software.
    • -
    • Improve engineering efficiency and quality by using a common user interface and seamless data transfer across different exchanger types.
    • -
    -

    AspenTech Aspen Exchanger Design Rating 7.3.rar is a powerful tool that can help engineers design optimal heat exchangers for any process application. To learn more about this software and how to download it, visit https://www.aspentech.com/en/products/engineering/aspen-exchanger-design-and-rating.

    - -

    Heat exchangers are devices that transfer heat between two or more fluids at different temperatures. They can be used for heating, cooling, condensing, evaporating or recovering heat from various process streams. Heat exchangers play a vital role in improving the efficiency and sustainability of many industrial processes.

    -

    However, designing heat exchangers is not a simple task. Engineers have to consider many factors, such as the type and properties of the fluids, the heat transfer coefficients, the pressure drops, the fouling factors, the mechanical design and the fabrication costs. Moreover, engineers have to comply with the relevant codes and standards that govern the design and operation of heat exchangers.

    -

    -

    That is why engineers need a reliable and comprehensive software tool that can help them design and rate heat exchangers with confidence and accuracy. AspenTech Aspen Exchanger Design Rating 7.3.rar is such a tool. It combines the expertise and experience of AspenTech, the leader in process optimization software, with the knowledge and innovation of Aspen HTFS, the leader in heat transfer research.

    e93f5a0c3f
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/CCCAM GENERATOR.rarl.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/CCCAM GENERATOR.rarl.md deleted file mode 100644 index c82529750b5b671b8bfb9fab69e36ccfeea069d8..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/CCCAM GENERATOR.rarl.md +++ /dev/null @@ -1,35 +0,0 @@ - -

    How to Use CCCAM GENERATOR.rarl to Get Free CCcam Server Test

    -

    CCCAM GENERATOR.rarl is a file that contains a list of free CCcam server tests that you can use to watch your favorite channels on your satellite receiver. CCcam is a protocol that allows you to share your subscription card with other users over the internet. With CCCAM GENERATOR.rarl, you don't need to buy a subscription card, you just need to download the file and use a software like WinRAR to extract it.

    -

    CCCAM GENERATOR.rarl


    Download File >>> https://urlcod.com/2uIaI6



    -

    In this article, we will show you how to use CCCAM GENERATOR.rarl to get free CCcam server test and enjoy watching your favorite channels without paying anything.

    -

    Step 1: Download CCCAM GENERATOR.rarl

    -

    The first step is to download CCCAM GENERATOR.rarl from a reliable source. You can find many websites that offer this file for free, such as CCcamFrei, CCcam.net, CCcamsate.com, or HeyLink.me.[^1^] [^2^] [^3^] [^4^]

    -

    Make sure you download the latest version of the file, as it is updated regularly with new and working CCcam server tests. You can check the date of the last update on the website or on the file name.

    -

    Step 2: Extract CCCAM GENERATOR.rarl

    -

    The second step is to extract CCCAM GENERATOR.rarl using a software like WinRAR. You can download WinRAR from here. After installing WinRAR, right-click on CCCAM GENERATOR.rarl and choose "Extract Here". You will get a folder with several files inside.

    -

    One of the files is called "CCCAM.CFG". This is the file that contains the free CCcam server tests. You can open it with a text editor like Notepad and see the list of CCcam server tests. Each test has a format like this:

    -

    -
    C: server port username password
    -

    For example:

    -
    C: free.cccam.net 26708 g0D5BoJw cccam.net
    -

    Step 3: Upload CCCAM.CFG to your satellite receiver

    -

    The third step is to upload CCCAM.CFG to your satellite receiver using a USB flash drive or an FTP client. The method may vary depending on your receiver model, but generally you need to do the following:

    -
      -
    • Copy CCCAM.CFG to your USB flash drive and insert it into your receiver.
    • -
    • Go to Menu > Network > CCcam Client Setup > Update Files by USB.
    • -
    • Select CCCAM.CFG and press OK.
    • -
    • Wait for the message "Update Success" and press Exit.
    • -
    -

    Alternatively, you can use an FTP client like FileZilla to upload CCCAM.CFG to your receiver. You need to know the IP address, username and password of your receiver. You can find them in Menu > Network > Network Setup. Then you need to do the following:

    -
      -
    • Open FileZilla and enter the IP address, username and password of your receiver in the Host, Username and Password fields respectively.
    • -
    • Click on Quickconnect and wait for the connection to be established.
    • -
    • Navigate to the folder where you want to upload CCCAM.CFG. Usually it is /var/etc/ or /etc/.
    • -
    • Drag and drop CCCAM.CFG from your computer to the folder on your receiver.
    • -
    • Wait for the transfer to be completed and close FileZilla.
    • -
    -

    Step 4: Activate CCcam on your satellite receiver

    -

    The final step is to activate CCcam on your satellite receiver and start watching your favorite channels. You need to do the following:

    e93f5a0c3f
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/CorelDRAW Graphics Suite X5 V15.0.0409 [REPACK] Download Pc.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/CorelDRAW Graphics Suite X5 V15.0.0409 [REPACK] Download Pc.md deleted file mode 100644 index b8310e8729797714845469ab80a053e2e67636d0..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/CorelDRAW Graphics Suite X5 V15.0.0409 [REPACK] Download Pc.md +++ /dev/null @@ -1,23 +0,0 @@ -
    -

    Why You Should Download CorelDRAW Graphics Suite X5 V15.0.0409 for Your PC

    -

    If you are looking for a powerful and versatile graphic design software that can help you create stunning designs for any media, you should consider downloading CorelDRAW Graphics Suite X5 V15.0.0409 for your PC. This software package includes multiple applications that can help you with vector illustration, photo editing, bitmap to vector conversion, screen capture, and content organization.

    -

    CorelDRAW Graphics Suite X5 V15.0.0409 was released in 2010 and it was one of the biggest upgrades in recent years. It introduced Corel CONNECT, a built-in content organizer that lets you store content in a digital tray that is synced between CorelDRAW and Corel PHOTO-PAINT, giving you instant access across multiple platforms. It also revamped and redesigned the color management engine, allowing for color consistency across platforms and support for PANTONE color profiles. This was a great feature for print designers, but web designers were not left behind. CorelDRAW Graphics Suite X5 V15.0.0409 also included a Pixels mode and a web animation tool that let you create designs in CorelDRAW and animate them in SWiSH miniMax 2.

    -

    CorelDRAW Graphics Suite X5 V15.0.0409 Download Pc


    DOWNLOAD →→→ https://urlcod.com/2uIbGv



    -

    CorelDRAW Graphics Suite X5 V15.0.0409 also came with over 1,000 professional fonts and thousands of royalty-free clipart images and photos that you can use for your projects. It also had built-in learning tools and video tutorials that helped you get started quickly and easily. The system requirements for this software were not very demanding, as it could run on Windows 7, Vista, or XP with at least 512 MB of RAM and 750 MB of hard disk space.

    -

    If you want to download CorelDRAW Graphics Suite X5 V15.0.0409 for your PC, you can find it on various websites that offer free software downloads. However, you should be careful about the source and the quality of the download, as some websites may contain viruses or malware that can harm your PC. You should also check the compatibility and the license of the software before installing it on your PC.

    -

    A better option would be to subscribe to the latest version of CorelDRAW Graphics Suite, which gives you access to exclusive features and content that are not available in older versions. You can also enjoy cloud-based collaboration and asset management workflows that make it faster and easier to work with teams and clients. You can also save money with a flexible and affordable subscription plan that lets you pay only for what you need.

    -

    Whether you choose to download CorelDRAW Graphics Suite X5 V15.0.0409 or subscribe to the newest version, you will be able to unleash your creativity and productivity with one of the best graphic design software packages in the market.

    - -

    CorelDRAW Graphics Suite X5 V15.0.0409 has many features and tools that can help you create amazing designs for any media. Here are some of the highlights of this software package:

    -

    -
      -
    • CorelDRAW X5: This is the main application for vector illustration and page layout. You can use it to create logos, signs, flyers, brochures, posters, banners, and more. You can also use it to draw shapes, curves, lines, and text with precision and control. You can also apply effects, styles, fills, and outlines to your objects.
    • -
    • Corel PHOTO-PAINT X5: This is the application for editing and retouching photos. You can use it to crop, resize, rotate, and enhance your images. You can also use it to remove unwanted elements, adjust colors, brightness, contrast, and sharpness. You can also apply filters, effects, and masks to your photos.
    • -
    • Corel PowerTRACE X5: This is the application for converting bitmaps to vectors. You can use it to trace scanned images, logos, sketches, or photos and turn them into editable vector graphics. You can also use it to smooth jagged edges, reduce the number of colors, and optimize the curves.
    • -
    • Corel CAPTURE X5: This is the application for screen capture. You can use it to capture any part of your screen as an image or a video. You can also use it to record your voice and mouse movements. You can then edit and save your captures in various formats.
    • -
    • Corel CONNECT: This is the application for content organization. You can use it to browse and search for content on your computer, network, or online sources. You can also use it to store content in a digital tray that is synced between CorelDRAW and Corel PHOTO-PAINT. You can then drag and drop content from the tray to your documents.
    • -
    -

    With CorelDRAW Graphics Suite X5 V15.0.0409, you can create stunning designs for any media with ease and confidence. However, if you want to enjoy the latest and greatest features and content from CorelDRAW Graphics Suite, you should consider subscribing to the newest version. You will get access to exclusive features and content that are not available in older versions. You will also get cloud-based collaboration and asset management workflows that make it faster and easier to work with teams and clients. You will also save money with a flexible and affordable subscription plan that lets you pay only for what you need.

    -

    If you are interested in subscribing to the latest version of CorelDRAW Graphics Suite, you can download a free trial from the official website and see the improved features for yourself. You can also compare the different subscription plans and choose the one that suits your needs and budget.

    7b8c122e87
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Cyber Cafe Crack Free Download.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Cyber Cafe Crack Free Download.md deleted file mode 100644 index 2c2a107cd81d2bab4708b7dbfd23dd0a654a5bcf..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Cyber Cafe Crack Free Download.md +++ /dev/null @@ -1,34 +0,0 @@ -
    -

    How to Download Cyber Cafe Pro Crack for Free

    -

    If you are looking for a way to manage your Internet cafe business, you might have heard of Cyber Cafe Pro, a powerful and user-friendly software that helps you monitor and control up to 250 client computers. Cyber Cafe Pro offers many features, such as pricing options, time codes, account management, reservations, and advanced admin settings. However, Cyber Cafe Pro is not a free software, and you need to pay a license fee to use it.

    -

    Cyber Cafe Crack Free Download


    DOWNLOAD ☆☆☆ https://urlcod.com/2uIbrY



    -

    Fortunately, there is a way to download Cyber Cafe Pro crack for free, which allows you to bypass the activation process and use the full version of the software without paying anything. In this article, we will show you how to download Cyber Cafe Pro crack for free and how to install it on your server and client computers.

    -

    How to Download Cyber Cafe Pro Crack for Free

    -

    The first step to download Cyber Cafe Pro crack for free is to find a reliable source that offers the cracked version of the software. There are many websites that claim to provide Cyber Cafe Pro crack for free, but some of them may contain viruses, malware, or spyware that can harm your computer or steal your data. Therefore, you need to be careful and choose a trustworthy website that has positive reviews and feedback from other users.

    -

    One of the websites that we recommend is Get Into PC, which is a popular and reputable website that offers free downloads of various software, including Cyber Cafe Pro. You can visit their website at https://getintopc.com/softwares/network/cyber-cafe-pro-free-download/ and click on the Download button to start downloading Cyber Cafe Pro crack for free.

    -

    -

    The file size is about 13 MB, and it will take a few minutes to complete the download depending on your Internet speed. Once the download is finished, you will get a ZIP file that contains the setup file and the crack file of Cyber Cafe Pro.

    -

    How to Install Cyber Cafe Pro Crack on Your Server Computer

    -

    The next step is to install Cyber Cafe Pro crack on your server computer, which is the main computer that controls all the client computers in your Internet cafe. To do this, you need to follow these steps:

    -
      -
    1. Extract the ZIP file that you downloaded from Get Into PC using a program like WinRAR or 7-Zip.
    2. -
    3. Run the setup file named ccpserver.exe as an administrator.
    4. -
    5. Follow the instructions on the screen to complete the installation process. You can choose the default settings or customize them according to your preferences.
    6. -
    7. When the installation is done, do not launch the program yet.
    8. -
    9. Copy the crack file named ccpserver.exe from the Crack folder and paste it into the installation folder of Cyber Cafe Pro, which is usually located at C:\Program Files (x86)\CyberCafePro Server.
    10. -
    11. Replace the original file when prompted.
    12. -
    13. Now you can launch Cyber Cafe Pro from the desktop shortcut or the Start menu.
    14. -
    15. You will see a configuration wizard that will guide you through the initial setup of your Internet cafe. You can enable or disable various options, such as Play & Pay mode, pricing options, local taxes, master password, etc.
    16. -
    17. After completing the configuration wizard, you will see the main interface of Cyber Cafe Pro, where you can monitor and manage all your client computers.
    18. -
    -

    How to Install Cyber Cafe Pro Crack on Your Client Computers

    -

    The final step is to install Cyber Cafe Pro crack on your client computers, which are the computers that your customers use in your Internet cafe. To do this, you need to follow these steps:

    -
      -
    1. Copy the setup file named ccpclient.exe from the ZIP file that you downloaded from Get Into PC onto a USB flash drive or any other removable media.
    2. -
    3. Insert the USB flash drive into each client computer that you want to install Cyber Cafe Pro on.
    4. -
    5. Run the setup file named ccpclient.exe as an administrator on each client computer.
    6. -
    7. Follow the instructions on the screen to complete the installation process. You can choose the default settings or customize them according to your preferences.
    8. -
    9. When the installation is done, do not launch the program yet.
    10. -
    11. Copy the crack file named ccpclient.exe from the Crack folder and paste it into the installation folder of Cyber Cafe Pro on each client computer, which is usually located at C:\Program Files (x86)\CyberCafePro Client. 81aa517590
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/FORMS2XML UTILITY DOWNLOADl.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/FORMS2XML UTILITY DOWNLOADl.md deleted file mode 100644 index 2832541d18724ae9c3f386369cc137cab997a832..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/FORMS2XML UTILITY DOWNLOADl.md +++ /dev/null @@ -1,35 +0,0 @@ -
      -

      How to Download and Use the Forms2XML Utility

      -

      If you want to convert Oracle Forms modules to XML files, you need to use the Forms2XML utility. This utility is a tool that can convert FormModules, ObjectLibraries, or MenuModules to Extensible Markup Language (XML) format. You can then use these XML files to migrate your Oracle Forms applications to Oracle Application Express.

      -

      But how can you download and use the Forms2XML utility? In this article, we will show you the steps to do that.

      -

      FORMS2XML UTILITY DOWNLOADl


      Downloadhttps://urlcod.com/2uIawg



      -

      Step 1: Download the Forms2XML Utility

      -

      The Forms2XML utility is part of the Oracle Developer Suite, which includes Oracle Forms. You can download the Oracle Developer Suite from the Oracle website. You can choose either Oracle Developer Suite 9i or 10g, as both versions include the Forms2XML utility. However, if you have Oracle Forms modules from earlier releases, you may need to upgrade them to 9i or 10g before converting them to XML.

      -

      After downloading the Oracle Developer Suite, you need to install it on your computer. Follow the installation instructions on the Oracle website or in the documentation.

      -

      Step 2: Run the Forms2XML Utility

      -

      Once you have installed the Oracle Developer Suite, you can run the Forms2XML utility from a command line or from a Java program. The utility takes one or more Oracle Forms files as arguments and produces XML files with the same base name and an .xml extension. The extension _fmb, _mmb, or _olb is added to indicate whether the original file was a FormModule, a MenuModule, or an ObjectLibrary.

      -

      The syntax for running the Forms2XML utility from a command line is:

      -
      frmf2xml [options] file1 [ file2 ...]
      -java oracle.forms.util.xmltools.Forms2XML [options] file1 [ file2 ...]
      -
      -

      The options are:

      -
        -
      • USE_PROPERTY_IDS=YES/NO: Setting this option to YES causes the utility to write the internal ID for Real or Character into the XML file. The default value is NO.
      • -
      • OVERWRITE=YES/NO: Setting this option to YES causes the utility to overwrite any existing XML files with the same name in the output directory. The default value is NO.
      • -
      -

      Note: The Forms2XML utility must generate an XML file in English only. If the generated XML tags are not in English, then the file will fail to load. Also, the conversion of synonym-based data blocks is not supported. Only data blocks based on tables or views are supported by the conversion process.

      -

      The syntax for running the Forms2XML utility from a Java program is:

      -
      import oracle.forms.util.xmltools.Forms2XML;
      -...
      -Forms2XML converter = new Forms2XML();
      -converter.convertFormsFile("file1.fmb", "file1_fmb.xml");
      -converter.convertFormsFile("file2.mmb", "file2_mmb.xml");
      -...
      -
      -

      You can also use other methods of the Forms2XML class to set options or get information about the conversion process. See the documentation for more details.

      -

      -

      Step 3: Use the XML Files for Migration

      -

      After converting your Oracle Forms files to XML files, you can use them for migrating your applications to Oracle Application Express. You need to create a workspace and add users in Oracle Application Express, upload your database objects into your schema, create a conversion project, review and edit your forms metadata, and generate your application. You can find more information about these steps in this guide.

      -

      We hope this article helped you learn how to download and use the Forms2XML utility. If you have any questions or feedback, please let us know.

      7b8c122e87
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Full !!BETTER!! Jazler RadioStar 2.2.30 [full !!BETTER!!][Multilenguaje].md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Full !!BETTER!! Jazler RadioStar 2.2.30 [full !!BETTER!!][Multilenguaje].md deleted file mode 100644 index 732099aa607626c2db0a482d20d19b00c0417060..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Full !!BETTER!! Jazler RadioStar 2.2.30 [full !!BETTER!!][Multilenguaje].md +++ /dev/null @@ -1,80 +0,0 @@ -## FULL Jazler RadioStar 2.2.30 [Full][Multilenguaje] - - - - - - - - - -**LINK ►►► [https://hyabrimhyfit.blogspot.com/?c=2txCmN](https://hyabrimhyfit.blogspot.com/?c=2txCmN)** - - - - - - - - - - - - - -# FULL Jazler RadioStar 2.2.30: The Ultimate Radio Automation Software - - - -If you are looking for a radio automation software that can handle all your needs, look no further than FULL Jazler RadioStar 2.2.30. This software has all the features you need to run a professional radio station, with an easy to use user interface, automated features to save time, and unlimited workstations to support your production, marketing and sales departments. - - - -FULL Jazler RadioStar 2.2.30 is available in multiple languages, including English, Spanish, French, German, Italian, Portuguese, Greek and more. You can customize the software to suit your preferences and needs, and enjoy the benefits of a reliable and stable radio automation system. - - - -## What can FULL Jazler RadioStar 2.2.30 do for you? - - - -Here are some of the features that make FULL Jazler RadioStar 2.2.30 stand out from other radio automation software: - - - -- **Songs database with advanced editing:** You can easily edit your music database, organize and categorize your music collection, add or edit tags, cue points, fade points, intros and outros[^1^]. You can also import music from CDs, MP3s or other sources[^4^]. - -- **Automated playlists based on a clock:** You can create automated playlists based on a clock that defines the music genres, jingles, commercials and other elements that will play at specific times[^4^]. You can also preview where commercials will interrupt your playlist in the on-air rotation[^1^]. - -- **Voicetracking:** You can add or record voicetracks and include them for specific playlists without saving them in the database[^1^]. You can also use the built-in voice recorder to record voice messages or announcements[^4^]. - -- **Remote directories:** You can read the remote directories while being online, and refresh your radio content even if you're only on the internet (no internet connection needed)[^3^]. You can also synchronize the tags of the music files of the shared music while being online[^3^]. - -- **Local network environment:** You can work perfectly in a local network environment to provide unlimited workstations to the production, marketing and sales departments[^1^] [^4^]. You can also share music files, playlists, commercials and other elements among different workstations[^4^]. - -- **And much more:** FULL Jazler RadioStar 2.2.30 also offers features such as live assist mode, instant jingles, sweepers, music scheduling, commercials scheduling, reports and statistics, webcasting support, backup system and more[^4^]. - - - -## How to get FULL Jazler RadioStar 2.2.30? - - - -If you are interested in getting FULL Jazler RadioStar 2.2.30 [Full][Multilenguaje], you can download it from one of these sources: - - - -1. [Uloz.to Disk](https://ulozto.net/file/sf1tUyaP5/jazler-radiostar-2-2-30-full-multilenguaje-www-zonatorrent-com-rar?redirected=1): This is a file sharing service that offers fast and secure downloads[^1^]. The file size is 44 MB. - -2. [My Iptv Forum](http://myiptvforum.com/resources/jazler-radiostar-2-2-30-full-multilenguaje.3938/): This is a forum for IPTV enthusiasts that offers various resources[^2^]. The file size is not specified. - -3. [New C Mi Com](https://new.c.mi.com/ng/post/74167/EXCLUSIVE_Full_Jazler_RadioStar_2230_EXCLUSIVE_Ful): This is a website for Xiaomi fans that offers exclusive content[ 1b8d091108 - - - - - - - - - diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Prestopagemanager7downloadcracked TOP.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Prestopagemanager7downloadcracked TOP.md deleted file mode 100644 index aa4654c1eb651529639e48536fc76101295fa998..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Prestopagemanager7downloadcracked TOP.md +++ /dev/null @@ -1,19 +0,0 @@ - -

      How to use PrestoPageManager 7 ED to scan and organize your documents

      -

      PrestoPageManager 7 ED is a software that allows you to scan, edit, organize and share your documents in various formats. It is compatible with Windows and Mac OS X operating systems and supports a wide range of scanners and multifunction printers. In this article, we will show you how to use PrestoPageManager 7 ED to scan and organize your documents in a few simple steps.

      -

      Step 1: Install PrestoPageManager 7 ED

      -

      To install PrestoPageManager 7 ED, you need to have the installation CD or download the software from the official website. Follow the instructions on the screen to complete the installation process. You may need to restart your computer after the installation.

      -

      prestopagemanager7downloadcracked


      Download Filehttps://urlcod.com/2uIaST



      -

      Step 2: Launch PrestoPageManager 7 ED

      -

      To launch PrestoPageManager 7 ED, you can either double-click on the desktop icon or select it from the Start menu (Windows) or the Applications folder (Mac OS X). You will see the main interface of PrestoPageManager 7 ED, which consists of four main parts: the toolbar, the folder pane, the thumbnail pane and the preview pane.

      -

      Step 3: Scan your documents

      -

      To scan your documents, you need to connect your scanner or multifunction printer to your computer and turn it on. Then, click on the Scan button on the toolbar or select File > Scan from the menu. You will see a dialog box where you can choose your scanner model, scanning mode, resolution, color mode and other settings. You can also preview your scanned image before saving it. Click on the Scan button to start scanning your document. The scanned image will appear in the thumbnail pane and the preview pane.

      -

      Step 4: Edit your documents

      -

      To edit your documents, you can use the tools on the toolbar or select Edit from the menu. You can rotate, crop, resize, adjust brightness and contrast, remove red-eye, add text and annotations, and apply filters and effects to your scanned image. You can also use the OCR (optical character recognition) function to convert your scanned image into editable text. To do this, click on the OCR button on the toolbar or select Edit > OCR from the menu. You will see a dialog box where you can choose your language, output format and other settings. Click on the OCR button to start converting your scanned image into text. The text will appear in a new window where you can edit it further.

      -

      Step 5: Organize your documents

      -

      To organize your documents, you can use the folder pane and the thumbnail pane. You can create folders and subfolders to store your scanned images and text files. You can also rename, move, copy, delete and sort your files by name, date, size or type. You can also use the stack function to group related files together. To do this, select the files you want to stack and click on the Stack button on the toolbar or select File > Stack from the menu. You will see a new file with a stack icon in the thumbnail pane. You can double-click on it to expand or collapse it.

      -

      Step 6: Share your documents

      -

      To share your documents, you can use the share function on the toolbar or select File > Share from the menu. You can choose to share your files via email, cloud storage services (such as Dropbox or Google Drive), social media platforms (such as Facebook or Twitter), or print them out. You can also convert your files into PDF or other formats before sharing them.

      -

      PrestoPageManager 7 ED is a powerful and easy-to-use software that helps you scan and organize your documents in a convenient way. You can download a free trial version from https://www.newsoftinc.com/product/prestopagemanager-7-ed/ and try it out for yourself.

      7b8c122e87
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Reclock Pot Player 64-bit 11 REPACK.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Reclock Pot Player 64-bit 11 REPACK.md deleted file mode 100644 index 69e2f8feea10ed66e9821ea36d254a67bc2ec0ac..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Reclock Pot Player 64-bit 11 REPACK.md +++ /dev/null @@ -1,34 +0,0 @@ -
      -

      How to Use ReClock with PotPlayer 64-bit 11

      -

      ReClock is a DirectShow filter that can synchronize the audio and video streams of a movie file, reduce jitter and audio glitches, and improve the playback quality. PotPlayer is a versatile and powerful media player that supports a variety of formats and codecs. In this article, we will show you how to use ReClock with PotPlayer 64-bit 11 on Windows.

      -

      reclock pot player 64-bit 11


      Download ->>->>->> https://urlcod.com/2uI9N1



      -

      Step 1: Download and install ReClock

      -

      You can download ReClock from here. The latest version as of April 2023 is 1.9.0.7 Beta. You need to install both the 32-bit and 64-bit versions of ReClock, as PotPlayer can switch between them depending on the file type. To install ReClock, run the installer and follow the instructions. You may need to reboot your computer after the installation.

      -

      Step 2: Download and install PotPlayer

      -

      You can download PotPlayer from here. The latest version as of April 2023 is 2.0.0317. To install PotPlayer, run the installer and follow the instructions. You can customize the installation options according to your preferences.

      -

      Step 3: Configure ReClock

      -

      To configure ReClock, right-click on its icon in the system tray and select Settings. You will see a window with several tabs. Here are some recommended settings:

      -
        -
      • On the General tab, check "Enable ReClock for video renderers" and "Enable ReClock for audio renderers".
      • -
      • On the Audio tab, select your audio device and output format. You can also adjust the volume and dynamic range compression.
      • -
      • On the Video tab, check "Enable video clock" and "Use video clock for audio clock". You can also adjust the video clock speed and tolerance.
      • -
      • On the Sync tab, check "Enable sync correction" and "Use sync correction for audio clock". You can also adjust the sync correction mode and threshold.
      • -
      • On the Advanced tab, you can fine-tune some settings such as buffer size, sample rate conversion, and logging.
      • -
      -

      You can also access the ReClock settings from PotPlayer by right-clicking on the video window and selecting Filters > ReClock.

      -

      Step 4: Configure PotPlayer

      -

      To configure PotPlayer, right-click on its icon in the system tray and select Preferences. You will see a window with several categories. Here are some recommended settings:

      -
        -
      • On the General > Filter Control category, check "Prefer external filters" and "Use system default filter merit".
      • -
      • On the Filter Priority > External Filter category, click on Add Filter and select ReClock from the list. Then click on Prefer.
      • -
      • On the Playback > Video Renderer category, select your preferred video renderer. You can also adjust some settings such as deinterlacing, color management, and subtitles.
      • -
      • On the Playback > Audio Renderer category, select ReClock as your audio renderer.
      • -
      • On the Audio > Resample category, check "Use built-in resampler" and select your preferred resampling method.
      • -
      -

      You can also access the PotPlayer preferences from its main window by clicking on the gear icon or pressing F5.

      -

      -

      Step 5: Enjoy your movies

      -

      Now you are ready to enjoy your movies with ReClock and PotPlayer. To open a movie file, you can drag and drop it to PotPlayer's main window or use the Open File menu. You can also use keyboard shortcuts or mouse gestures to control the playback. You can check if ReClock is working by right-clicking on its icon in the system tray and selecting Status. You will see a window with some information such as audio format, video format, sync correction, and video clock.

      -

      We hope this article was helpful for you. If you have any questions or feedback, please leave a comment below.

      cec2833e83
      -
      -
      \ No newline at end of file diff --git a/spaces/neuroliptica/2ch_captcha/README.md b/spaces/neuroliptica/2ch_captcha/README.md deleted file mode 100644 index 292a1f36edcb62633b9fc2f063bc5d94c8b7d117..0000000000000000000000000000000000000000 --- a/spaces/neuroliptica/2ch_captcha/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: 2ch Captcha -emoji: 👁 -colorFrom: gray -colorTo: indigo -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/compute/matmul_fixed_avx2.h b/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/compute/matmul_fixed_avx2.h deleted file mode 100644 index 59e7d0eaa9aa576543ca428d3ad983c6ffa6b62a..0000000000000000000000000000000000000000 --- a/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/compute/matmul_fixed_avx2.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LYRA_CODEC_SPARSE_MATMUL_COMPUTE_MATMUL_FIXED_AVX2_H_ -#define LYRA_CODEC_SPARSE_MATMUL_COMPUTE_MATMUL_FIXED_AVX2_H_ - -#include - -namespace csrblocksparse { -namespace detail { - -// Version that covers all possible combinations of the variable conditions: -// |relu|, |shift_out|, |replicas|, with int16 output. -void MatVec4x4FixedAVX2(const int16_t* weights_ptr, const int16_t* rhs, - const int32_t* bias, const int32_t* nnz_per_row, - const int16_t* rhs_indices, int start_row, int end_row, - bool relu, int shift_out, int replicas, int stride, - int16_t* output); -// Version that covers all possible combinations of the variable conditions: -// |relu|, |shift_out|, |replicas|, with int32 output. -void MatVec4x4FixedAVX2(const int16_t* weights_ptr, const int16_t* rhs, - const int32_t* bias, const int32_t* nnz_per_row, - const int16_t* rhs_indices, int start_row, int end_row, - bool relu, int shift_out, int replicas, int stride, - int32_t* output); -// Version that covers the main conditions used with 8x4: -// |relu|, |shift_out|, with int32 output. -void MatVec8x4FixedAVX2(const int16_t* weights_ptr, const int16_t* rhs, - const int32_t* bias, const int32_t* nnz_per_row, - const int16_t* rhs_indices, int start_row, int end_row, - bool relu, int shift_out, int32_t* output); - -} // namespace detail -} // namespace csrblocksparse - -#endif // LYRA_CODEC_SPARSE_MATMUL_COMPUTE_MATMUL_FIXED_AVX2_H_ diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/parse.py b/spaces/oguzakif/video-object-remover/FGT_codes/FGT/parse.py deleted file mode 100644 index 72dfa32c0d985bb903ab074cedb7513c867d2ee8..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/parse.py +++ /dev/null @@ -1,77 +0,0 @@ -import yaml -import os -import os.path as osp - - -def path_correction(dataInfo, workdir): - for key in dataInfo.keys(): - if 'path' in key: - dataInfo[key] = os.path.join(workdir, dataInfo[key]) - return dataInfo - - -def val_path_correction(valInfo, workdir): - for key in valInfo.keys(): - if 'root' in key: - valInfo[key] = os.path.join(workdir, valInfo[key]) - return valInfo - - -def parse(args_setting, is_train=True): - opt_path = args_setting['opt'] - print('Current working dir is: {}'.format(os.getcwd())) - print('There are {} sub directories here'.format(os.listdir(os.getcwd()))) - with open(opt_path, 'r', encoding='utf-8') as f: - opt = yaml.safe_load(f) - - opt['is_train'] = is_train - opt = {**args_setting, **opt} - - name = opt['name'] - datadir, outputdir = opt['datadir'], opt['outputdir'] - - datasets = {} - for phase, args in opt['datasets'].items(): - # phase is `train`, `val` or `test` - datasets[phase] = args - if phase == 'train': - with open(args['dataInfo_config'], 'r', encoding='utf-8') as f: - dataInfo = yaml.safe_load(f) - dataInfo = path_correction(dataInfo, datadir) - datasets['dataInfo'] = dataInfo - if phase == 'val': - with open(args['val_config'], 'r', encoding='utf-8') as f: - valInfo = yaml.safe_load(f) - valInfo = val_path_correction(valInfo, datadir) - datasets['valInfo'] = valInfo - opt['datasets'] = datasets - - # path - opt['path'] = {} - - # training settings - if is_train: - output_root = osp.join(outputdir, opt['name'], 'experiments') - opt['path']['OUTPUT_ROOT'] = output_root - opt['path']['TRAINING_STATE'] = osp.join(output_root, 'training_state') - opt['path']['LOG'] = osp.join(output_root, 'log') - opt['path']['VAL_IMAGES'] = osp.join(output_root, 'val_images') - else: # for test - result_root = osp.join(datadir, opt['path']['OUTPUT_ROOT'], 'results', opt['name']) - opt['path']['RESULT_ROOT'] = osp.join(result_root, 'RESULT_ROOT') - opt['path']['LOG'] = result_root - - return opt - - -def toString(opt, indent_l=1): - msg = '' - for k, v in opt.items(): - if isinstance(v, dict): - msg += ' ' * (indent_l * 2) + k + ':[\n' - msg += toString(v, indent_l=1) - msg += ' ' * (indent_l * 2) + ']\n' - else: - msg += ' ' * (indent_l * 2) + k + ': ' + str(v) + '\n' - return msg - diff --git a/spaces/ombhojane/Fetch-Alerts/app.py b/spaces/ombhojane/Fetch-Alerts/app.py deleted file mode 100644 index 98c44dfa014e7c014d436e3fc7bab9472efebbd5..0000000000000000000000000000000000000000 --- a/spaces/ombhojane/Fetch-Alerts/app.py +++ /dev/null @@ -1,158 +0,0 @@ -import streamlit as st -import requests -import logging -import emails -from emails.template import JinjaTemplate as T -import openai -import os - -# Define your OpenWeather API key here -openweather_api_key = "bb34b4f6362247530f4b2091d0a18a9e" - -# List of weather conditions and icons -weather_icons = { - "Clear": "☀️", - "Clouds": "☁️", - "Drizzle": "🌧️", - "Rain": "🌧️", - "Thunderstorm": "⛈️", - "Snow": "❄️", - "Mist": "🌫️", - "Smoke": "🌫️", - "Haze": "🌫️", - "Dust": "🌫️", - "Fog": "🌫️", - "Sand": "🌫️", - "Ash": "🌫️", - "Squall": "🌫️", - "Tornado": "🌪️" -} - -def fetch_temperature(location, api_key): - base_url = "https://api.openweathermap.org/data/2.5/weather" - params = { - "q": location, - "appid": api_key, - "units": "metric", - } - - try: - response = requests.get(base_url, params=params) - response.raise_for_status() - - data = response.json() - - if "main" in data and "temp" in data["main"]: - temperature = data["main"]["temp"] - return temperature - else: - return None - except requests.exceptions.RequestException as e: - logging.error(f"Error fetching temperature: {str(e)}") - return None - -def send_temperature_alert(user_email, location, current_temperature, threshold, alert_type): - subject = T(f"Temperature {alert_type} Alert") - html_body = T(f"

      Temperature in {location} is {alert_type} {threshold}°C.

      " - f"

      Current temperature: {current_temperature}°C

      ") - - message = emails.html(html=html_body, subject=subject, mail_from=("Temperature Alert", "alert@mycompany.com")) - - try: - response = message.send(to=(user_email,)) - if response.status_code == 250: - return True - except Exception as e: - logging.error(f"Error sending email: {str(e)}") - - return False - -def fetch_weather_condition(location, api_key): - base_url = "https://api.openweathermap.org/data/2.5/weather" - params = { - "q": location, - "appid": api_key, - "units": "metric", - } - - try: - response = requests.get(base_url, params=params) - response.raise_for_status() - - data = response.json() - - if "weather" in data and len(data["weather"]) > 0: - weather_condition = data["weather"][0]["main"] - return weather_condition - else: - return None - except requests.exceptions.RequestException as e: - logging.error(f"Error fetching weather condition: {str(e)}") - return None - -def generate_suggestions(location, current_temperature, min_temp_threshold, max_temp_threshold, weather_condition): - - openai.api_key = "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" - - if min_temp_threshold <= current_temperature <= max_temp_threshold: - prompt = f"Provide suggestions for someone in {location} where it's {current_temperature}°C and {weather_condition}." - elif current_temperature < min_temp_threshold: - prompt = f"Provide suggestions for someone in {location} where it's {current_temperature}°C and {weather_condition}. " \ - f"Recommend suitable clothing and precautions for cold weather." - else: - prompt = f"Provide suggestions for someone in {location} where it's {current_temperature}°C and {weather_condition}. " \ - f"Recommend suitable clothing and precautions for hot weather." - - response = openai.Completion.create( - engine="text-davinci-002", - prompt=prompt, - max_tokens=600, - n=1, - stop=None, - temperature=0.7, - ) - - suggestions = response.choices[0].text - return suggestions - - -def main(): - st.title("Fetch Alerts : Personalized Weather Suggestions App") - user_location = st.text_input("Enter your preferred location (e.g., Paris, France):") - user_email = st.text_input("Enter your email address:") - col1, col2 = st.columns(2) - min_temp_threshold = col1.number_input("Minimum Temp (°C)", value=27) - max_temp_threshold = col2.number_input("Maximum Temp (°C)", value=30) - check_button = st.button("Check Temperature and Weather") - - if check_button: - weather_condition = fetch_weather_condition(user_location, openweather_api_key) - - if weather_condition is not None: - st.header(f"Results of {user_location} {weather_icons[weather_condition]}") - current_temperature = fetch_temperature(user_location, openweather_api_key) - - if current_temperature is not None: - st.write(f"Current temperature: {current_temperature}°C") - - if current_temperature < min_temp_threshold: - if user_email: - if send_temperature_alert(user_email, user_location, current_temperature, min_temp_threshold, "Low"): - st.success("Low temperature email alert sent successfully!") - else: - st.error(f"Failed to send a low temperature alert to {user_email}") - elif current_temperature > max_temp_threshold: - if user_email: - if send_temperature_alert(user_email, user_location, current_temperature, max_temp_threshold, "High"): - st.success("High temperature email alert sent successfully!") - else: - st.error(f"Failed to send a high temperature alert to {user_email}") - - - suggestions = generate_suggestions(user_location, current_temperature, min_temp_threshold, max_temp_threshold, weather_condition) - st.subheader("Personalized Suggestions:") - st.write(suggestions) - -if __name__ == "__main__": - main() - \ No newline at end of file diff --git a/spaces/onnx/EfficientNet-Lite4/README.md b/spaces/onnx/EfficientNet-Lite4/README.md deleted file mode 100644 index 3a24cee20d3e11828a7b3ca304ee562ffd07c4bf..0000000000000000000000000000000000000000 --- a/spaces/onnx/EfficientNet-Lite4/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: EfficientNet Lite4 -emoji: 🦀 -colorFrom: blue -colorTo: indigo -sdk: gradio -sdk_version: 2.8.8 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/opencompass/MMBench/templates/index.html b/spaces/opencompass/MMBench/templates/index.html deleted file mode 100644 index c2dfac7ec9b63ce788a59a37ab189017940f8411..0000000000000000000000000000000000000000 --- a/spaces/opencompass/MMBench/templates/index.html +++ /dev/null @@ -1,35 +0,0 @@ - - - - - - - My static Space - - - - -
      - -
      - - - \ No newline at end of file diff --git a/spaces/opetrova/face-frontalization/README.md b/spaces/opetrova/face-frontalization/README.md deleted file mode 100644 index a8b78b52d08d432863e7d3c7d6ece07cb5adfdc9..0000000000000000000000000000000000000000 --- a/spaces/opetrova/face-frontalization/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Face Frontalization -emoji: ⚡ -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 2.8.10 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/osanseviero/test_gradio/README.md b/spaces/osanseviero/test_gradio/README.md deleted file mode 100644 index 4e58ccfcbff091b02c388ecf928d5f6406763b85..0000000000000000000000000000000000000000 --- a/spaces/osanseviero/test_gradio/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Test_gradio -emoji: ⚡ -colorFrom: yellow -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/stable_diffusion/image_variation.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/stable_diffusion/image_variation.md deleted file mode 100644 index 4895ababf5bd19fdd02578647ecec6f4885423f5..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/api/pipelines/stable_diffusion/image_variation.md +++ /dev/null @@ -1,37 +0,0 @@ - - -# Image variation - -The Stable Diffusion model can also generate variations from an input image. It uses a fine-tuned version of a Stable Diffusion model by [Justin Pinkney](https://www.justinpinkney.com/) from [Lambda](https://lambdalabs.com/). - -The original codebase can be found at [LambdaLabsML/lambda-diffusers](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) and additional official checkpoints for image variation can be found at [lambdalabs/sd-image-variations-diffusers](https://huggingface.co/lambdalabs/sd-image-variations-diffusers). - - - -Make sure to check out the Stable Diffusion [Tips](./overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! - - - -## StableDiffusionImageVariationPipeline - -[[autodoc]] StableDiffusionImageVariationPipeline - - all - - __call__ - - enable_attention_slicing - - disable_attention_slicing - - enable_xformers_memory_efficient_attention - - disable_xformers_memory_efficient_attention - -## StableDiffusionPipelineOutput - -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/using-diffusers/schedulers.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/using-diffusers/schedulers.md deleted file mode 100644 index c791b47b783270cbdaf70f8407aff6309838bc92..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/docs/source/en/using-diffusers/schedulers.md +++ /dev/null @@ -1,315 +0,0 @@ - - -# Schedulers - -[[open-in-colab]] - -Diffusion pipelines are inherently a collection of diffusion models and schedulers that are partly independent from each other. This means that one is able to switch out parts of the pipeline to better customize -a pipeline to one's use case. The best example of this is the [Schedulers](../api/schedulers/overview.md). - -Whereas diffusion models usually simply define the forward pass from noise to a less noisy sample, -schedulers define the whole denoising process, *i.e.*: -- How many denoising steps? -- Stochastic or deterministic? -- What algorithm to use to find the denoised sample - -They can be quite complex and often define a trade-off between **denoising speed** and **denoising quality**. -It is extremely difficult to measure quantitatively which scheduler works best for a given diffusion pipeline, so it is often recommended to simply try out which works best. - -The following paragraphs show how to do so with the 🧨 Diffusers library. - -## Load pipeline - -Let's start by loading the [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) model in the [`DiffusionPipeline`]: - -```python -from huggingface_hub import login -from diffusers import DiffusionPipeline -import torch - -login() - -pipeline = DiffusionPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True -) -``` - -Next, we move it to GPU: - -```python -pipeline.to("cuda") -``` - -## Access the scheduler - -The scheduler is always one of the components of the pipeline and is usually called `"scheduler"`. -So it can be accessed via the `"scheduler"` property. - -```python -pipeline.scheduler -``` - -**Output**: -``` -PNDMScheduler { - "_class_name": "PNDMScheduler", - "_diffusers_version": "0.8.0.dev0", - "beta_end": 0.012, - "beta_schedule": "scaled_linear", - "beta_start": 0.00085, - "clip_sample": false, - "num_train_timesteps": 1000, - "set_alpha_to_one": false, - "skip_prk_steps": true, - "steps_offset": 1, - "trained_betas": null -} -``` - -We can see that the scheduler is of type [`PNDMScheduler`]. -Cool, now let's compare the scheduler in its performance to other schedulers. -First we define a prompt on which we will test all the different schedulers: - -```python -prompt = "A photograph of an astronaut riding a horse on Mars, high resolution, high definition." -``` - -Next, we create a generator from a random seed that will ensure that we can generate similar images as well as run the pipeline: - -```python -generator = torch.Generator(device="cuda").manual_seed(8) -image = pipeline(prompt, generator=generator).images[0] -image -``` - -

      -
      - -
      -

      - - -## Changing the scheduler - -Now we show how easy it is to change the scheduler of a pipeline. Every scheduler has a property [`SchedulerMixin.compatibles`] -which defines all compatible schedulers. You can take a look at all available, compatible schedulers for the Stable Diffusion pipeline as follows. - -```python -pipeline.scheduler.compatibles -``` - -**Output**: -``` -[diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler, - diffusers.schedulers.scheduling_ddim.DDIMScheduler, - diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler, - diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler, - diffusers.schedulers.scheduling_pndm.PNDMScheduler, - diffusers.schedulers.scheduling_ddpm.DDPMScheduler, - diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler] -``` - -Cool, lots of schedulers to look at. Feel free to have a look at their respective class definitions: - -- [`LMSDiscreteScheduler`], -- [`DDIMScheduler`], -- [`DPMSolverMultistepScheduler`], -- [`EulerDiscreteScheduler`], -- [`PNDMScheduler`], -- [`DDPMScheduler`], -- [`EulerAncestralDiscreteScheduler`]. - -We will now compare the input prompt with all other schedulers. To change the scheduler of the pipeline you can make use of the -convenient [`ConfigMixin.config`] property in combination with the [`ConfigMixin.from_config`] function. - -```python -pipeline.scheduler.config -``` - -returns a dictionary of the configuration of the scheduler: - -**Output**: -``` -FrozenDict([('num_train_timesteps', 1000), - ('beta_start', 0.00085), - ('beta_end', 0.012), - ('beta_schedule', 'scaled_linear'), - ('trained_betas', None), - ('skip_prk_steps', True), - ('set_alpha_to_one', False), - ('steps_offset', 1), - ('_class_name', 'PNDMScheduler'), - ('_diffusers_version', '0.8.0.dev0'), - ('clip_sample', False)]) -``` - -This configuration can then be used to instantiate a scheduler -of a different class that is compatible with the pipeline. Here, -we change the scheduler to the [`DDIMScheduler`]. - -```python -from diffusers import DDIMScheduler - -pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) -``` - -Cool, now we can run the pipeline again to compare the generation quality. - -```python -generator = torch.Generator(device="cuda").manual_seed(8) -image = pipeline(prompt, generator=generator).images[0] -image -``` - -

      -
      - -
      -

      - -If you are a JAX/Flax user, please check [this section](#changing-the-scheduler-in-flax) instead. - -## Compare schedulers - -So far we have tried running the stable diffusion pipeline with two schedulers: [`PNDMScheduler`] and [`DDIMScheduler`]. -A number of better schedulers have been released that can be run with much fewer steps, let's compare them here: - -[`LMSDiscreteScheduler`] usually leads to better results: - -```python -from diffusers import LMSDiscreteScheduler - -pipeline.scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config) - -generator = torch.Generator(device="cuda").manual_seed(8) -image = pipeline(prompt, generator=generator).images[0] -image -``` - -

      -
      - -
      -

      - - -[`EulerDiscreteScheduler`] and [`EulerAncestralDiscreteScheduler`] can generate high quality results with as little as 30 steps. - -```python -from diffusers import EulerDiscreteScheduler - -pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) - -generator = torch.Generator(device="cuda").manual_seed(8) -image = pipeline(prompt, generator=generator, num_inference_steps=30).images[0] -image -``` - -

      -
      - -
      -

      - - -and: - -```python -from diffusers import EulerAncestralDiscreteScheduler - -pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(pipeline.scheduler.config) - -generator = torch.Generator(device="cuda").manual_seed(8) -image = pipeline(prompt, generator=generator, num_inference_steps=30).images[0] -image -``` - -

      -
      - -
      -

      - - -At the time of writing this doc [`DPMSolverMultistepScheduler`] gives arguably the best speed/quality trade-off and can be run with as little -as 20 steps. - -```python -from diffusers import DPMSolverMultistepScheduler - -pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) - -generator = torch.Generator(device="cuda").manual_seed(8) -image = pipeline(prompt, generator=generator, num_inference_steps=20).images[0] -image -``` - -

      -
      - -
      -

      - -As you can see most images look very similar and are arguably of very similar quality. It often really depends on the specific use case which scheduler to choose. A good approach is always to run multiple different -schedulers to compare results. - -## Changing the Scheduler in Flax - -If you are a JAX/Flax user, you can also change the default pipeline scheduler. This is a complete example of how to run inference using the Flax Stable Diffusion pipeline and the super-fast [DDPM-Solver++ scheduler](../api/schedulers/multistep_dpm_solver): - -```Python -import jax -import numpy as np -from flax.jax_utils import replicate -from flax.training.common_utils import shard - -from diffusers import FlaxStableDiffusionPipeline, FlaxDPMSolverMultistepScheduler - -model_id = "runwayml/stable-diffusion-v1-5" -scheduler, scheduler_state = FlaxDPMSolverMultistepScheduler.from_pretrained( - model_id, - subfolder="scheduler" -) -pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( - model_id, - scheduler=scheduler, - revision="bf16", - dtype=jax.numpy.bfloat16, -) -params["scheduler"] = scheduler_state - -# Generate 1 image per parallel device (8 on TPUv2-8 or TPUv3-8) -prompt = "a photo of an astronaut riding a horse on mars" -num_samples = jax.device_count() -prompt_ids = pipeline.prepare_inputs([prompt] * num_samples) - -prng_seed = jax.random.PRNGKey(0) -num_inference_steps = 25 - -# shard inputs and rng -params = replicate(params) -prng_seed = jax.random.split(prng_seed, jax.device_count()) -prompt_ids = shard(prompt_ids) - -images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images -images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) -``` - - - -The following Flax schedulers are _not yet compatible_ with the Flax Stable Diffusion Pipeline: - -- `FlaxLMSDiscreteScheduler` -- `FlaxDDPMScheduler` - - diff --git a/spaces/pamixsun/glaucoma_screening/app.py b/spaces/pamixsun/glaucoma_screening/app.py deleted file mode 100644 index a5ecb83b555264502fd2eeb7f48136ba4a2670b5..0000000000000000000000000000000000000000 --- a/spaces/pamixsun/glaucoma_screening/app.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (C) 2023, Xu Sun. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -import torch -import numpy as np - -import matplotlib.pyplot as plt -import streamlit as st - -from PIL import Image -from glaucoma import GlaucomaModel - -run_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - -def main(): - # Wide mode - st.set_page_config(layout="wide") - - # Designing the interface - st.title("Glaucoma Screening from Retinal Fundus Images") - # For newline - st.write('\n') - # Author info - st.write('Developed by X. Sun. Find more info about me: https://pamixsun.github.io') - # For newline - st.write('\n') - # Instructions - st.markdown("*Hint: click on the top-right corner of an image to enlarge it!*") - # Set the columns - cols = st.beta_columns((1, 1, 1)) - cols[0].subheader("Input image") - cols[1].subheader("Optic disc and optic cup") - cols[2].subheader("Class activation map") - - # set the visualization figure - fig, ax = plt.subplots() - - # Sidebar - # File selection - st.sidebar.title("Image selection") - # Disabling warning - st.set_option('deprecation.showfileUploaderEncoding', False) - # Choose your own image - uploaded_file = st.sidebar.file_uploader("Upload image", type=['png', 'jpeg', 'jpg']) - if uploaded_file is not None: - # read the upload image - image = Image.open(uploaded_file).convert('RGB') - image = np.array(image).astype(np.uint8) - # page_idx = 0 - ax.imshow(image) - ax.axis('off') - cols[0].pyplot(fig) - - # For newline - st.sidebar.write('\n') - - # actions - if st.sidebar.button("Analyze image"): - - if uploaded_file is None: - st.sidebar.write("Please upload an image") - - else: - with st.spinner('Loading model...'): - # load model - model = GlaucomaModel(device=run_device) - - with st.spinner('Analyzing...'): - # Forward the image to the model and get results - disease_idx, disc_cup_image, cam, vcdr = model.process(image) - - # plot the optic disc and optic cup image - ax.imshow(disc_cup_image) - ax.axis('off') - cols[1].pyplot(fig) - - # plot the stitched image - ax.imshow(cam) - ax.axis('off') - cols[2].pyplot(fig) - - # Display JSON - st.subheader(" Screening results:") - st.write('\n') - - final_results_as_table = f""" - |Parameters|Outcomes| - |---|---| - |Vertical cup-to-disc ratio|{vcdr:.04f}| - |Category|{model.cls_id2label[disease_idx]}| - """ - st.markdown(final_results_as_table) - - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/spaces/patimus-prime/strain_selection/README.md b/spaces/patimus-prime/strain_selection/README.md deleted file mode 100644 index 67c5921fffbba94b25ff054c6700b72a84e01daf..0000000000000000000000000000000000000000 --- a/spaces/patimus-prime/strain_selection/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Strain Selection -emoji: 😻 -colorFrom: yellow -colorTo: gray -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/petermutwiri/Movie_Review_Application/functions.py b/spaces/petermutwiri/Movie_Review_Application/functions.py deleted file mode 100644 index 6f6113a600276e017842b9b4851442dcf71dd6c9..0000000000000000000000000000000000000000 --- a/spaces/petermutwiri/Movie_Review_Application/functions.py +++ /dev/null @@ -1,30 +0,0 @@ -from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification -from scipy.special import softmax -# Define the preprocess function -def preprocess(text): - new_text = [] - for t in text.split(" "): - t = '@user' if t.startswith('@') and len(t) > 1 else t - t = 'http' if t.startswith('http') else t - new_text.append(t) - return " ".join(new_text) - -# Define the sentiment_analysis function -def sentiment_analysis(text, tokenizer, model): - text = preprocess(text) - encoded_input = tokenizer(text, return_tensors='pt') - output = model(**encoded_input) - scores_ = output[0][0].detach().numpy() - scores_ = softmax(scores_) - labels = ['Negative', 'Positive'] - scores = {l: float(s) for (l, s) in zip(labels, scores_)} - return scores - -# Define the map_sentiment_score_to_rating function -def map_sentiment_score_to_rating(score): - min_score = 0.0 - max_score = 1.0 - min_rating = 1 - max_rating = 10 - rating = ((score - min_score) / (max_score - min_score)) * (max_rating - min_rating) + min_rating - return rating \ No newline at end of file diff --git a/spaces/pierreguillou/question-answering-portuguese/README.md b/spaces/pierreguillou/question-answering-portuguese/README.md deleted file mode 100644 index 970adaf40984027cfbc34f2a4ad1763f480f865c..0000000000000000000000000000000000000000 --- a/spaces/pierreguillou/question-answering-portuguese/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Question Answering Portuguese -emoji: 🌍 -colorFrom: indigo -colorTo: indigo -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/plzdontcry/dakubettergpt/src/components/Menu/NewChat.tsx b/spaces/plzdontcry/dakubettergpt/src/components/Menu/NewChat.tsx deleted file mode 100644 index 22e658798e4db6d59eb15330e3b6518657a8f42f..0000000000000000000000000000000000000000 --- a/spaces/plzdontcry/dakubettergpt/src/components/Menu/NewChat.tsx +++ /dev/null @@ -1,42 +0,0 @@ -import React from 'react'; -import { useTranslation } from 'react-i18next'; -import useStore from '@store/store'; - -import PlusIcon from '@icon/PlusIcon'; - -import useAddChat from '@hooks/useAddChat'; - -const NewChat = ({ folder }: { folder?: string }) => { - const { t } = useTranslation(); - const addChat = useAddChat(); - const generating = useStore((state) => state.generating); - - return ( - { - if (!generating) addChat(folder); - }} - title={folder ? String(t('newChat')) : ''} - > - {folder ? ( -
      - {t('newChat')} -
      - ) : ( - <> - - {t('newChat')} - - )} -
      - ); -}; - -export default NewChat; diff --git a/spaces/pngwn/nextjs/out/_next/static/chunks/main-bb4186ea0c56aaf2.js b/spaces/pngwn/nextjs/out/_next/static/chunks/main-bb4186ea0c56aaf2.js deleted file mode 100644 index 5ce4e5cd20138fb10e544cf7718c69a390f15307..0000000000000000000000000000000000000000 --- a/spaces/pngwn/nextjs/out/_next/static/chunks/main-bb4186ea0c56aaf2.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[179],{6086:function(e){"use strict";var t=Object.assign.bind(Object);e.exports=t,e.exports.default=e.exports},37:function(){"trimStart"in String.prototype||(String.prototype.trimStart=String.prototype.trimLeft),"trimEnd"in String.prototype||(String.prototype.trimEnd=String.prototype.trimRight),"description"in Symbol.prototype||Object.defineProperty(Symbol.prototype,"description",{configurable:!0,get:function(){var e=/\((.*)\)/.exec(this.toString());return e?e[1]:void 0}}),Array.prototype.flat||(Array.prototype.flat=function(e,t){return t=this.concat.apply([],this),e>1&&t.some(Array.isArray)?t.flat(e-1):t},Array.prototype.flatMap=function(e,t){return this.map(e,t).flat()}),Promise.prototype.finally||(Promise.prototype.finally=function(e){if("function"!=typeof e)return this.then(e,e);var t=this.constructor||Promise;return this.then((function(r){return t.resolve(e()).then((function(){return r}))}),(function(r){return t.resolve(e()).then((function(){throw r}))}))})},6007:function(e,t){"use strict";function r(e,t){return null!=t&&"undefined"!==typeof Symbol&&t[Symbol.hasInstance]?t[Symbol.hasInstance](e):e instanceof t}Object.defineProperty(t,"__esModule",{value:!0}),t.isEqualNode=a,t.default=function(){var e=null;return{mountedInstances:new Set,updateHead:function(t){var r=e=Promise.resolve().then((function(){if(r===e){e=null;var n={};t.forEach((function(e){if("link"===e.type&&e.props["data-optimized-fonts"]){if(document.querySelector('style[data-href="'.concat(e.props["data-href"],'"]')))return;e.props.href=e.props["data-href"],e.props["data-href"]=void 0}var t=n[e.type]||[];t.push(e),n[e.type]=t}));var i=n.title?n.title[0]:null,u="";if(i){var c=i.props.children;u="string"===typeof c?c:Array.isArray(c)?c.join(""):""}u!==document.title&&(document.title=u),["meta","base","link","style","script"].forEach((function(e){!function(e,t){var r=document.getElementsByTagName("head")[0],n=r.querySelector("meta[name=next-head-count]");0;for(var i=Number(n.content),u=[],c=0,s=n.previousElementSibling;ce.length)&&(t=e.length);for(var r=0,n=new Array(t);r0&&void 0!==l[0]?l[0]:{},r=q,e.prev=3,e.next=6,te.routeLoader.whenEntrypoint("/_app");case 6:if(!("error"in(n=e.sent))){e.next=9;break}throw n.error;case 9:a=n.component,i=n.exports,ae=a,u=i&&i.reportWebVitals,ie=function(e){var t,r=e.id,n=e.name,o=e.startTime,a=e.value,i=e.duration,c=e.entryType,s=e.entries,l="".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12);s&&s.length&&(t=s[0].startTime);var f={id:r||l,name:n,startTime:o||t,value:null==a?i:a,label:"mark"===c||"measure"===c?"custom":"web-vital"};null===u||void 0===u||u(f),C.trackWebVitalMetric(f)},e.next=17;break;case 17:return e.next=19,te.routeLoader.whenEntrypoint(H);case 19:e.t0=e.sent;case 20:if(!("error"in(c=e.t0))){e.next=23;break}throw c.error;case 23:se=c.component,e.next=28;break;case 28:e.next=33;break;case 30:e.prev=30,e.t1=e.catch(3),r=k.getProperError(e.t1);case 33:if(!window.__NEXT_PRELOADREADY){e.next=37;break}return e.next=37,window.__NEXT_PRELOADREADY($);case 37:return t.router=oe=A.createRouter(H,W,ee,{initialProps:U,pageLoader:te,App:ae,Component:se,wrapApp:Se,err:r,isFallback:Boolean(V),subscription:function(e,t,r){return he(Object.assign({},e,{App:t,scroll:r}))},locale:X,locales:Y,defaultLocale:J,domainLocales:K,isPreview:Q}),he(s={App:ae,initial:!0,Component:se,props:U,err:r}),e.abrupt("return",fe);case 44:return e.abrupt("return",{emitter:fe,renderCtx:s});case 45:case"end":return e.stop()}}),e,null,[[3,30]])})))).apply(this,arguments)}function de(){return(de=L(o.default.mark((function e(t){var r;return o.default.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:if(!t.err){e.next=4;break}return e.next=3,ye(t);case 3:return e.abrupt("return");case 4:return e.prev=4,e.next=7,xe(t);case 7:e.next=17;break;case 9:if(e.prev=9,e.t0=e.catch(4),!(r=k.getProperError(e.t0)).cancelled){e.next=14;break}throw r;case 14:return e.next=17,ye(N({},t,{err:r}));case 17:case"end":return e.stop()}}),e,null,[[4,9]])})))).apply(this,arguments)}function he(e){return de.apply(this,arguments)}function ye(e){var t=e.App,n=e.err;return console.error(n),console.error("A client-side exception has occurred, see here for more info: https://nextjs.org/docs/messages/client-side-exception-occurred"),te.loadPage("/_error").then((function(e){var t=e.page,n=e.styleSheets;return(null===Pe||void 0===Pe?void 0:Pe.Component)===t?Promise.resolve().then((function(){return function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var r in e)if(Object.prototype.hasOwnProperty.call(e,r)){var n=Object.defineProperty&&Object.getOwnPropertyDescriptor?Object.getOwnPropertyDescriptor(e,r):{};n.get||n.set?Object.defineProperty(t,r,n):t[r]=e[r]}return t.default=e,t}(r(9185))})).then((function(e){return{ErrorComponent:e.default,styleSheets:[]}})):{ErrorComponent:t,styleSheets:n}})).then((function(r){var o=r.ErrorComponent,a=r.styleSheets,i=Se(t),u={Component:o,AppTree:i,router:oe,ctx:{err:n,pathname:H,query:W,asPath:ee,AppTree:i}};return Promise.resolve(e.props?e.props:S.loadGetInitialProps(t,u)).then((function(t){return xe(N({},e,{err:n,Component:o,styleSheets:a,props:t}))}))}))}t.emitter=fe;var ve=!0;function me(){S.ST&&(performance.mark("afterHydrate"),performance.measure("Next.js-before-hydration","navigationStart","beforeRender"),performance.measure("Next.js-hydration","beforeRender","afterHydrate"),ie&&performance.getEntriesByName("Next.js-hydration").forEach(ie),be())}function ge(){if(S.ST){performance.mark("afterRender");var e=performance.getEntriesByName("routeChange","mark");e.length&&(performance.measure("Next.js-route-change-to-render",e[0].name,"beforeRender"),performance.measure("Next.js-render","beforeRender","afterRender"),ie&&(performance.getEntriesByName("Next.js-render").forEach(ie),performance.getEntriesByName("Next.js-route-change-to-render").forEach(ie)),be(),["Next.js-route-change-to-render","Next.js-render"].forEach((function(e){return performance.clearMeasures(e)})))}}function be(){["beforeRender","afterHydrate","afterRender","routeChange"].forEach((function(e){return performance.clearMarks(e)}))}function we(e){var t=e.children;return h.default.createElement(le,{fn:function(e){return ye({App:ae,err:e}).catch((function(e){return console.error("Error rendering page: ",e)}))}},h.default.createElement(g.RouterContext.Provider,{value:A.makePublicRouterInstance(oe)},h.default.createElement(v.HeadManagerContext.Provider,{value:ue},t)))}function _e(e,t){return h.default.createElement(e,Object.assign({},t))}var Pe,Se=function(e){return function(t){var r=N({},t,{Component:se,err:q,router:oe});return h.default.createElement(we,null,_e(e,r))}};function xe(e){var t=function(){s()},r=e.App,n=e.Component,o=e.props,a=e.err,i=e.__N_RSC,u="initial"in e?void 0:e.styleSheets;n=n||Pe.Component;var c=N({},o=o||Pe.props,{Component:!!i?undefined:n,err:a,router:oe});Pe=c;var s,l=!1,f=new Promise((function(e,t){ne&&ne(),s=function(){ne=null,e()},ne=function(){l=!0,ne=null;var e=new Error("Cancel rendering route");e.cancelled=!0,t(e)}}));!function(){if(!u)return!1;var e=F(document.querySelectorAll("style[data-n-href]")),t=new Set(e.map((function(e){return e.getAttribute("data-n-href")}))),r=document.querySelector("noscript[data-n-css]"),n=null===r||void 0===r?void 0:r.getAttribute("data-n-css");u.forEach((function(e){var r=e.href,o=e.text;if(!t.has(r)){var a=document.createElement("style");a.setAttribute("data-n-href",r),a.setAttribute("media","x"),n&&a.setAttribute("nonce",n),document.head.appendChild(a),a.appendChild(document.createTextNode(o))}}))}();var p=h.default.createElement(h.default.Fragment,null,h.default.createElement(Oe,{callback:function(){if(u&&!l){for(var t=new Set(u.map((function(e){return e.href}))),r=F(document.querySelectorAll("style[data-n-href]")),n=r.map((function(e){return e.getAttribute("data-n-href")})),o=0;oe.length)&&(t=e.length);for(var r=0,n=new Array(t);re.length)&&(t=e.length);for(var r=0,n=new Array(t);re.length)&&(t=e.length);for(var r=0,n=new Array(t);re.length)&&(t=e.length);for(var r=0,n=new Array(t);r=0||(o[r]=e[r]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(o[r]=e[r])}return o}var p=new Map,d=new Set,h=["onLoad","dangerouslySetInnerHTML","children","onError","strategy"],y=function(e){var t=e.src,r=e.id,n=e.onLoad,a=void 0===n?function(){}:n,i=e.dangerouslySetInnerHTML,c=e.children,s=void 0===c?"":c,l=e.strategy,f=void 0===l?"afterInteractive":l,y=e.onError,v=r||t;if(!v||!d.has(v)){if(p.has(t))return d.add(v),void p.get(t).then(a,y);var m=document.createElement("script"),g=new Promise((function(e,t){m.addEventListener("load",(function(t){e(),a&&a.call(this,t)})),m.addEventListener("error",(function(e){t(e)}))})).catch((function(e){y&&y(e)}));t&&p.set(t,g),d.add(v),i?m.innerHTML=i.__html||"":s?m.textContent="string"===typeof s?s:Array.isArray(s)?s.join(""):"":t&&(m.src=t);var b=!0,w=!1,_=void 0;try{for(var P,S=Object.entries(e)[Symbol.iterator]();!(b=(P=S.next()).done);b=!0){var x=o(P.value,2),E=x[0],O=x[1];if(void 0!==O&&!h.includes(E)){var j=u.DOMAttributeNames[E]||E.toLowerCase();m.setAttribute(j,O)}}}catch(R){w=!0,_=R}finally{try{b||null==S.return||S.return()}finally{if(w)throw _}}m.setAttribute("data-nscript",f),document.body.appendChild(m)}};function v(e){var t=e.strategy,r=void 0===t?"afterInteractive":t;"afterInteractive"===r?y(e):"lazyOnload"===r&&window.addEventListener("load",(function(){c.requestIdleCallback((function(){return y(e)}))}))}},7813:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.trackWebVitalMetric=function(e){a.push(e),o.forEach((function(t){return t(e)}))},t.useWebVitalsReport=function(e){var t=n.useRef(0);n.useEffect((function(){for(var r=function(r){e(r),t.current=a.length},n=t.current;n0&&void 0!==arguments[0]?arguments[0]:{},t=e.ampFirst,r=void 0!==t&&t,n=e.hybrid,o=void 0!==n&&n,a=e.hasQuery,i=void 0!==a&&a;return r||o&&i}},8404:function(e,t,r){"use strict";var n;Object.defineProperty(t,"__esModule",{value:!0}),t.HeadManagerContext=void 0;var o=((n=r(7294))&&n.__esModule?n:{default:n}).default.createContext({});t.HeadManagerContext=o},5443:function(e,t,r){"use strict";function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}Object.defineProperty(t,"__esModule",{value:!0}),t.defaultHead=l,t.default=void 0;var o,a=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var r in e)if(Object.prototype.hasOwnProperty.call(e,r)){var n=Object.defineProperty&&Object.getOwnPropertyDescriptor?Object.getOwnPropertyDescriptor(e,r):{};n.get||n.set?Object.defineProperty(t,r,n):t[r]=e[r]}return t.default=e,t}(r(7294)),i=(o=r(5188))&&o.__esModule?o:{default:o},u=r(2227),c=r(8404),s=r(3240);function l(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0],t=[a.default.createElement("meta",{charSet:"utf-8"})];return e||t.push(a.default.createElement("meta",{name:"viewport",content:"width=device-width"})),t}function f(e,t){return"string"===typeof t||"number"===typeof t?e:t.type===a.default.Fragment?e.concat(a.default.Children.toArray(t.props.children).reduce((function(e,t){return"string"===typeof t||"number"===typeof t?e:e.concat(t)}),[])):e.concat(t)}var p=["name","httpEquiv","charSet","itemProp"];function d(e,t){return e.reduce((function(e,t){var r=a.default.Children.toArray(t.props.children);return e.concat(r)}),[]).reduce(f,[]).reverse().concat(l(t.inAmpMode)).filter(function(){var e=new Set,t=new Set,r=new Set,n={};return function(o){var a=!0,i=!1;if(o.key&&"number"!==typeof o.key&&o.key.indexOf("$")>0){i=!0;var u=o.key.slice(o.key.indexOf("$")+1);e.has(u)?a=!1:e.add(u)}switch(o.type){case"title":case"base":t.has(o.type)?a=!1:t.add(o.type);break;case"meta":for(var c=0,s=p.length;ce.length)&&(t=e.length);for(var r=0,n=new Array(t);r>>0,1)},emit:function(t){for(var r=arguments.length,o=new Array(r>1?r-1:0),a=1;ae.length)&&(t=e.length);for(var r=0,n=new Array(t);r-1||r>-1)&&(e=e.substring(0,t>-1?t:r)),e}function M(e){return(e=C(e))===j||e.startsWith(j+"/")}function L(e){return function(e,t){if(!e.startsWith("/")||!t)return e;var r=C(e);return p.normalizePathTrailingSlash("".concat(t).concat(r))+e.substr(r.length)}(e,j)}function T(e){return(e=e.slice(j.length)).startsWith("/")||(e="/".concat(e)),e}function I(e){if(e.startsWith("/")||e.startsWith("#")||e.startsWith("?"))return!0;try{var t=g.getLocationOrigin(),r=new URL(e,t);return r.origin===t&&M(r.pathname)}catch(n){return!1}}function N(e,t,r){var n="",o=x.getRouteRegex(e),a=o.groups,i=(t!==e?S.getRouteMatcher(o)(t):"")||r;n=e;var u=Object.keys(a);return u.every((function(e){var t=i[e]||"",r=a[e],o=r.repeat,u=r.optional,c="[".concat(o?"...":"").concat(e,"]");return u&&(c="".concat(t?"":"/","[").concat(c,"]")),o&&!Array.isArray(t)&&(t=[t]),(u||e in i)&&(n=n.replace(c,o?t.map((function(e){return encodeURIComponent(e)})).join("/"):encodeURIComponent(t))||"/")}))||(n=""),{params:u,result:n}}function D(e,t){var r={};return Object.keys(e).forEach((function(n){t.includes(n)||(r[n]=e[n])})),r}function F(e,t,r){var n,o="string"===typeof t?t:g.formatWithValidation(t),a=o.match(/^[a-zA-Z]{1,}:\/\//),i=a?o.substr(a[0].length):o;if((i.split("?")[0]||"").match(/(\/\/|\\)/)){console.error("Invalid href passed to next/router: ".concat(o,", repeated forward-slashes (//) or backslashes \\ are not valid in the href"));var u=g.normalizeRepeatedSlashes(i);o=(a?a[0]:"")+u}if(!I(o))return r?[o]:o;try{n=new URL(o.startsWith("#")?e.asPath:e.pathname,"http://n")}catch(v){n=new URL("/","http://n")}try{var c=new URL(o,n);c.pathname=p.normalizePathTrailingSlash(c.pathname);var s="";if(b.isDynamicRoute(c.pathname)&&c.searchParams&&r){var l=_.searchParamsToUrlQuery(c.searchParams),f=N(c.pathname,c.pathname,l),d=f.result,h=f.params;d&&(s=g.formatWithValidation({pathname:d,hash:c.hash,query:D(l,h)}))}var y=c.origin===n.origin?c.href.slice(c.origin.length):c.href;return r?[y,s||y]:y}catch(m){return r?[o]:o}}function U(e){var t=g.getLocationOrigin();return e.startsWith(t)?e.substring(t.length):e}function q(e,t,r){var n=f(F(e,t,!0),2),o=n[0],a=n[1],i=g.getLocationOrigin(),u=o.startsWith(i),c=a&&a.startsWith(i);o=U(o),a=a?U(a):a;var s=u?o:L(o),l=r?U(F(e,r)):a||o;return{url:s,as:c?l:L(l)}}function H(e,t){var r=p.removePathTrailingSlash(y.denormalizePagePath(e));return"/404"===r||"/_error"===r?e:(t.includes(r)||t.some((function(t){if(b.isDynamicRoute(t)&&x.getRouteRegex(t).re.test(r))return e=t,!0})),p.removePathTrailingSlash(e))}var W=Symbol("SSG_DATA_NOT_FOUND");function B(e,t,r){return fetch(e,{credentials:"same-origin"}).then((function(n){if(!n.ok){if(t>1&&n.status>=500)return B(e,t-1,r);if(404===n.status)return n.json().then((function(e){if(e.notFound)return{notFound:W};throw new Error("Failed to load static props")}));throw new Error("Failed to load static props")}return r.text?n.text():n.json()}))}function z(e,t,r,n,o){var a=new URL(e,window.location.href).href;return void 0!==n[a]?n[a]:n[a]=B(e,t?3:1,{text:r}).catch((function(e){throw t||d.markAssetError(e),e})).then((function(e){return o||delete n[a],e})).catch((function(e){throw delete n[a],e}))}var G=function(){function e(t,r,n,o){var a,i=o.initialProps,u=o.pageLoader,c=o.App,s=o.wrapApp,l=o.Component,f=o.err,d=o.subscription,h=o.isFallback,y=o.locale,v=(o.locales,o.defaultLocale,o.domainLocales,o.isPreview),m=this;(function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,e),this.sdc={},this.sdr={},this.sde={},this._idx=0,this.onPopState=function(e){var t=e.state;if(t){if(t.__N){var r=t.url,n=t.as,o=t.options,a=t.idx;m._idx=a;var i=w.parseRelativeUrl(r).pathname;m.isSsr&&n===L(m.asPath)&&i===L(m.pathname)||m._bps&&!m._bps(t)||m.change("replaceState",r,n,Object.assign({},o,{shallow:o.shallow&&m._shallow,locale:o.locale||m.defaultLocale}),undefined)}}else{var u=m.pathname,c=m.query;m.changeState("replaceState",g.formatWithValidation({pathname:L(u),query:c}),g.getURL())}},this.route=p.removePathTrailingSlash(t),this.components={},"/_error"!==t)&&(this.components[this.route]={Component:l,initial:!0,props:i,err:f,__N_SSG:i&&i.__N_SSG,__N_SSP:i&&i.__N_SSP,__N_RSC:!!(null===(a=l)||void 0===a?void 0:a.__next_rsc__)});this.components["/_app"]={Component:c,styleSheets:[]},this.events=e.events,this.pageLoader=u,this.pathname=t,this.query=r;var _=b.isDynamicRoute(t)&&self.__NEXT_DATA__.autoExport;if(this.asPath=_?t:n,this.basePath=j,this.sub=d,this.clc=null,this._wrapApp=s,this.isSsr=!0,this.isFallback=h,this.isReady=!!(self.__NEXT_DATA__.gssp||self.__NEXT_DATA__.gip||self.__NEXT_DATA__.appGip&&!self.__NEXT_DATA__.gsp||!_&&!self.location.search),this.isPreview=!!v,this.isLocaleDomain=!1,"//"!==n.substr(0,2)){var P={locale:y};P._shouldResolveHref=n!==t,this.changeState("replaceState",g.formatWithValidation({pathname:L(t),query:r}),g.getURL(),P)}window.addEventListener("popstate",this.onPopState)}var t,r,n;return t=e,(r=[{key:"reload",value:function(){window.location.reload()}},{key:"back",value:function(){window.history.back()}},{key:"push",value:function(e,t){var r,n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};return e=(r=q(this,e,t)).url,t=r.as,this.change("pushState",e,t,n)}},{key:"replace",value:function(e,t){var r,n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};return e=(r=q(this,e,t)).url,t=r.as,this.change("replaceState",e,t,n)}},{key:"change",value:function(t,r,n,a,i){var c=this;return u(o.default.mark((function u(){var s,y,v,m,_,P,E,O,j,R,F,U,B,z,G,$,V,X,Y,K,Q,J,Z,ee,te,re,ne,oe,ae,ie,ue,ce,se,le,fe,pe,de,he;return o.default.wrap((function(o){for(;;)switch(o.prev=o.next){case 0:if(I(r)){o.next=3;break}return window.location.href=r,o.abrupt("return",!1);case 3:s=a._h||a._shouldResolveHref||C(r)===C(n),a._h&&(c.isReady=!0),y=c.locale,o.next=18;break;case 18:if(a._h||(c.isSsr=!1),g.ST&&performance.mark("routeChange"),v=a.shallow,m={shallow:void 0!==v&&v},c._inFlightRoute&&c.abortComponentLoad(c._inFlightRoute,m),n=L(A(M(n)?T(n):n,a.locale,c.defaultLocale)),_=k(M(n)?T(n):n,c.locale),c._inFlightRoute=n,P=y!==c.locale,a._h||!c.onlyAHashChange(_)||P){o.next=35;break}return c.asPath=_,e.events.emit("hashChangeStart",n,m),c.changeState(t,r,n,a),c.scrollToHash(_),c.notify(c.components[c.route],null),e.events.emit("hashChangeComplete",n,m),o.abrupt("return",!0);case 35:return E=w.parseRelativeUrl(r),O=E.pathname,j=E.query,o.prev=38,o.t0=f,o.next=43,Promise.all([c.pageLoader.getPageList(),d.getClientBuildManifest(),c.pageLoader.getMiddlewareList()]);case 43:o.t1=o.sent,F=(0,o.t0)(o.t1,2),R=F[0],F[1].__rewrites,o.next=54;break;case 50:return o.prev=50,o.t2=o.catch(38),window.location.href=n,o.abrupt("return",!1);case 54:if(c.urlIsNew(_)||P||(t="replaceState"),U=n,O=O?p.removePathTrailingSlash(T(O)):O,s&&"/_error"!==O&&(a._shouldResolveHref=!0,E.pathname=H(O,R),E.pathname!==O&&(O=E.pathname,E.pathname=L(O),r=g.formatWithValidation(E))),I(n)){o.next=63;break}o.next=61;break;case 61:return window.location.href=n,o.abrupt("return",!1);case 63:if(U=k(T(U),c.locale),1===a._h&&!b.isDynamicRoute(p.removePathTrailingSlash(O))){o.next=84;break}return o.next=67,c._preflightRequest({as:n,cache:!0,pages:R,pathname:O,query:j});case 67:if("rewrite"!==(B=o.sent).type){o.next=72;break}j=l({},j,B.parsedAs.query),U=B.asPath,O=B.resolvedHref,E.pathname=B.resolvedHref,r=g.formatWithValidation(E),o.next=84;break;case 72:if("redirect"!==B.type||!B.newAs){o.next=76;break}return o.abrupt("return",c.change(t,B.newUrl,B.newAs,a));case 76:if("redirect"!==B.type||!B.destination){o.next=81;break}return window.location.href=B.destination,o.abrupt("return",new Promise((function(){})));case 81:if("refresh"!==B.type||n===window.location.pathname){o.next=84;break}return window.location.href=n,o.abrupt("return",new Promise((function(){})));case 84:if(z=p.removePathTrailingSlash(O),!b.isDynamicRoute(z)){o.next=100;break}if(G=w.parseRelativeUrl(U),$=G.pathname,V=x.getRouteRegex(z),X=S.getRouteMatcher(V)($),K=(Y=z===$)?N(z,$,j):{},X&&(!Y||K.result)){o.next=99;break}if(!((Q=Object.keys(V.groups).filter((function(e){return!j[e]}))).length>0)){o.next=97;break}throw new Error((Y?"The provided `href` (".concat(r,") value is missing query values (").concat(Q.join(", "),") to be interpolated properly. "):"The provided `as` value (".concat($,") is incompatible with the `href` value (").concat(z,"). "))+"Read more: https://nextjs.org/docs/messages/".concat(Y?"href-interpolation-failed":"incompatible-href-as"));case 97:o.next=100;break;case 99:Y?n=g.formatWithValidation(Object.assign({},G,{pathname:K.result,query:D(j,K.params)})):Object.assign(j,X);case 100:return e.events.emit("routeChangeStart",n,m),o.prev=101,o.next=105,c.getRouteInfo(z,O,j,n,U,m);case 105:if(ee=o.sent,te=ee.error,re=ee.props,ne=ee.__N_SSG,oe=ee.__N_SSP,!ne&&!oe||!re){o.next=132;break}if(!re.pageProps||!re.pageProps.__N_REDIRECT){o.next=117;break}if(!(ae=re.pageProps.__N_REDIRECT).startsWith("/")||!1===re.pageProps.__N_REDIRECT_BASE_PATH){o.next=115;break}return(ie=w.parseRelativeUrl(ae)).pathname=H(ie.pathname,R),ue=q(c,ae,ae),ce=ue.url,se=ue.as,o.abrupt("return",c.change(t,ce,se,a));case 115:return window.location.href=ae,o.abrupt("return",new Promise((function(){})));case 117:if(c.isPreview=!!re.__N_PREVIEW,re.notFound!==W){o.next=132;break}return o.prev=120,o.next=123,c.fetchComponent("/404");case 123:le="/404",o.next=129;break;case 126:o.prev=126,o.t3=o.catch(120),le="/_error";case 129:return o.next=131,c.getRouteInfo(le,le,j,n,U,{shallow:!1});case 131:ee=o.sent;case 132:return e.events.emit("beforeHistoryChange",n,m),c.changeState(t,r,n,a),a._h&&"/_error"===O&&500===(null===(J=self.__NEXT_DATA__.props)||void 0===J||null===(Z=J.pageProps)||void 0===Z?void 0:Z.statusCode)&&(null===re||void 0===re?void 0:re.pageProps)&&(re.pageProps.statusCode=500),fe=a.shallow&&c.route===z,de=null!==(pe=a.scroll)&&void 0!==pe?pe:!fe,he=de?{x:0,y:0}:null,o.next=141,c.set(z,O,j,_,ee,null!==i&&void 0!==i?i:he).catch((function(e){if(!e.cancelled)throw e;te=te||e}));case 141:if(!te){o.next=144;break}throw e.events.emit("routeChangeError",te,_,m),te;case 144:return e.events.emit("routeChangeComplete",n,m),o.abrupt("return",!0);case 149:if(o.prev=149,o.t4=o.catch(101),!h.default(o.t4)||!o.t4.cancelled){o.next=153;break}return o.abrupt("return",!1);case 153:throw o.t4;case 154:case"end":return o.stop()}}),u,null,[[38,50],[101,149],[120,126]])})))()}},{key:"changeState",value:function(e,t,r){var n=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{};"pushState"===e&&g.getURL()===r||(this._shallow=n.shallow,window.history[e]({url:t,as:r,options:n,__N:!0,idx:this._idx="pushState"!==e?this._idx:this._idx+1},"",r))}},{key:"handleRouteInfoError",value:function(t,r,n,a,i,c){var s=this;return u(o.default.mark((function u(){var l,f,p,y;return o.default.wrap((function(o){for(;;)switch(o.prev=o.next){case 0:if(!t.cancelled){o.next=2;break}throw t;case 2:if(!d.isAssetError(t)&&!c){o.next=6;break}throw e.events.emit("routeChangeError",t,a,i),window.location.href=a,R();case 6:if(o.prev=6,"undefined"!==typeof l&&"undefined"!==typeof f){o.next=18;break}return o.next=14,s.fetchComponent("/_error");case 14:p=o.sent,l=p.page,f=p.styleSheets;case 18:if((y={props:void 0,Component:l,styleSheets:f,err:t,error:t}).props){o.next=30;break}return o.prev=20,o.next=23,s.getInitialProps(l,{err:t,pathname:r,query:n});case 23:y.props=o.sent,o.next=30;break;case 26:o.prev=26,o.t0=o.catch(20),console.error("Error in error page `getInitialProps`: ",o.t0),y.props={};case 30:return o.abrupt("return",y);case 33:return o.prev=33,o.t1=o.catch(6),o.abrupt("return",s.handleRouteInfoError(h.default(o.t1)?o.t1:new Error(o.t1+""),r,n,a,i,!0));case 36:case"end":return o.stop()}}),u,null,[[6,33],[20,26]])})))()}},{key:"getRouteInfo",value:function(e,t,r,n,a,i){var c=this;return u(o.default.mark((function u(){var s,l,f,p,d,y,v,m,b,w,_,P;return o.default.wrap((function(o){for(;;)switch(o.prev=o.next){case 0:if(o.prev=0,s=c.components[e],!i.shallow||!s||c.route!==e){o.next=4;break}return o.abrupt("return",s);case 4:if(l=void 0,s&&!("initial"in s)&&(l=s),o.t0=l,o.t0){o.next=11;break}return o.next=10,c.fetchComponent(e).then((function(e){return{Component:e.page,styleSheets:e.styleSheets,__N_SSG:e.mod.__N_SSG,__N_SSP:e.mod.__N_SSP,__N_RSC:!!e.page.__next_rsc__}}));case 10:o.t0=o.sent;case 11:f=o.t0,p=f.Component,d=f.__N_SSG,y=f.__N_SSP,v=f.__N_RSC,o.next=17;break;case 17:return(d||y||v)&&(m=c.pageLoader.getDataHref({href:g.formatWithValidation({pathname:t,query:r}),asPath:a,ssg:d,rsc:v,locale:c.locale})),o.next=21,c._getData((function(){return d||y?z(m,c.isSsr,!1,d?c.sdc:c.sdr,!!d&&!c.isPreview):c.getInitialProps(p,{pathname:t,query:r,asPath:n,locale:c.locale,locales:c.locales,defaultLocale:c.defaultLocale})}));case 21:if(b=o.sent,!v){o.next=29;break}return o.next=25,c._getData((function(){return c._getFlightData(m)}));case 25:w=o.sent,_=w.fresh,P=w.data,b.pageProps=Object.assign(b.pageProps,{__flight_serialized__:P,__flight_fresh__:_});case 29:return f.props=b,c.components[e]=f,o.abrupt("return",f);case 34:return o.prev=34,o.t1=o.catch(0),o.abrupt("return",c.handleRouteInfoError(h.getProperError(o.t1),t,r,n,i));case 37:case"end":return o.stop()}}),u,null,[[0,34]])})))()}},{key:"set",value:function(e,t,r,n,o,a){return this.isFallback=!1,this.route=e,this.pathname=t,this.query=r,this.asPath=n,this.notify(o,a)}},{key:"beforePopState",value:function(e){this._bps=e}},{key:"onlyAHashChange",value:function(e){if(!this.asPath)return!1;var t=f(this.asPath.split("#"),2),r=t[0],n=t[1],o=f(e.split("#"),2),a=o[0],i=o[1];return!(!i||r!==a||n!==i)||r===a&&n!==i}},{key:"scrollToHash",value:function(e){var t=f(e.split("#"),2)[1],r=void 0===t?"":t;if(""!==r&&"top"!==r){var n=document.getElementById(r);if(n)n.scrollIntoView();else{var o=document.getElementsByName(r)[0];o&&o.scrollIntoView()}}else window.scrollTo(0,0)}},{key:"urlIsNew",value:function(e){return this.asPath!==e}},{key:"prefetch",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:e,r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},n=this;return u(o.default.mark((function a(){var i,u,c,s,f,d,h,y,v,m;return o.default.wrap((function(o){for(;;)switch(o.prev=o.next){case 0:return i=w.parseRelativeUrl(e),u=i.pathname,c=i.query,o.next=5,n.pageLoader.getPageList();case 5:s=o.sent,f=t,o.next=20;break;case 12:h=o.sent,d=h.__rewrites,y=P.default(L(A(t,n.locale)),s,d,i.query,(function(e){return H(e,s)}),n.locales),f=k(T(y.asPath),n.locale),y.matchedPage&&y.resolvedHref&&(u=y.resolvedHref,i.pathname=u,e=g.formatWithValidation(i)),o.next=21;break;case 20:i.pathname=H(i.pathname,s),i.pathname!==u&&(u=i.pathname,i.pathname=u,e=g.formatWithValidation(i));case 21:o.next=23;break;case 23:return o.next=25,n._preflightRequest({as:L(t),cache:!0,pages:s,pathname:u,query:c});case 25:return"rewrite"===(v=o.sent).type&&(i.pathname=v.resolvedHref,u=v.resolvedHref,c=l({},c,v.parsedAs.query),f=v.asPath,e=g.formatWithValidation(i)),m=p.removePathTrailingSlash(u),o.next=30,Promise.all([n.pageLoader._isSsg(m).then((function(t){return!!t&&z(n.pageLoader.getDataHref({href:e,asPath:f,ssg:!0,locale:"undefined"!==typeof r.locale?r.locale:n.locale}),!1,!1,n.sdc,!0)})),n.pageLoader[r.priority?"loadPage":"prefetch"](m)]);case 30:case"end":return o.stop()}}),a)})))()}},{key:"fetchComponent",value:function(e){var t=this;return u(o.default.mark((function r(){var n,a,i,u;return o.default.wrap((function(r){for(;;)switch(r.prev=r.next){case 0:return n=!1,a=t.clc=function(){n=!0},i=function(){if(n){var r=new Error('Abort fetching component for route: "'.concat(e,'"'));throw r.cancelled=!0,r}a===t.clc&&(t.clc=null)},r.prev=3,r.next=6,t.pageLoader.loadPage(e);case 6:return u=r.sent,i(),r.abrupt("return",u);case 11:throw r.prev=11,r.t0=r.catch(3),i(),r.t0;case 15:case"end":return r.stop()}}),r,null,[[3,11]])})))()}},{key:"_getData",value:function(e){var t=this,r=!1,n=function(){r=!0};return this.clc=n,e().then((function(e){if(n===t.clc&&(t.clc=null),r){var o=new Error("Loading initial props cancelled");throw o.cancelled=!0,o}return e}))}},{key:"_getFlightData",value:function(e){return z(e,!0,!0,this.sdc,!1).then((function(e){return{fresh:!0,data:e}}))}},{key:"_preflightRequest",value:function(e){var t=this;return u(o.default.mark((function r(){var n,a,i,u,c,s,l,d,h,y,m;return o.default.wrap((function(r){for(;;)switch(r.prev=r.next){case 0:return a=k(M(e.as)?T(e.as):e.as,t.locale),r.next=4,t.pageLoader.getMiddlewareList();case 4:if(r.sent.some((function(e){var t=f(e,2),r=t[0],n=t[1];return S.getRouteMatcher(E.getMiddlewareRegex(r,!n))(a)}))){r.next=8;break}return r.abrupt("return",{type:"next"});case 8:return r.next=10,t._getPreflightData({preflightHref:e.as,shouldCache:e.cache});case 10:if(i=r.sent,!(null===(n=i.rewrite)||void 0===n?void 0:n.startsWith("/"))){r.next=18;break}return u=w.parseRelativeUrl(v.normalizeLocalePath(M(i.rewrite)?T(i.rewrite):i.rewrite,t.locales).pathname),c=p.removePathTrailingSlash(u.pathname),e.pages.includes(c)?(s=!0,l=c):(l=H(c,e.pages))!==u.pathname&&e.pages.includes(l)&&(s=!0),r.abrupt("return",{type:"rewrite",asPath:u.pathname,parsedAs:u,matchedPage:s,resolvedHref:l});case 18:if(!i.redirect){r.next=24;break}if(!i.redirect.startsWith("/")){r.next=23;break}return d=p.removePathTrailingSlash(v.normalizeLocalePath(M(i.redirect)?T(i.redirect):i.redirect,t.locales).pathname),h=q(t,d,d),y=h.url,m=h.as,r.abrupt("return",{type:"redirect",newUrl:y,newAs:m});case 23:return r.abrupt("return",{type:"redirect",destination:i.redirect});case 24:if(!i.refresh||i.ssr){r.next=26;break}return r.abrupt("return",{type:"refresh"});case 26:return r.abrupt("return",{type:"next"});case 27:case"end":return r.stop()}}),r)})))()}},{key:"_getPreflightData",value:function(e){var t=this,r=e.preflightHref,n=e.shouldCache,o=void 0!==n&&n,a=new URL(r,window.location.href).href;return!this.isPreview&&o&&this.sde[a]?Promise.resolve(this.sde[a]):fetch(r,{method:"HEAD",credentials:"same-origin",headers:{"x-middleware-preflight":"1"}}).then((function(e){if(!e.ok)throw new Error("Failed to preflight request");return{cache:e.headers.get("x-middleware-cache"),redirect:e.headers.get("Location"),refresh:e.headers.has("x-middleware-refresh"),rewrite:e.headers.get("x-middleware-rewrite"),ssr:!!e.headers.get("x-middleware-ssr")}})).then((function(e){return o&&"no-cache"!==e.cache&&(t.sde[a]=e),e})).catch((function(e){throw delete t.sde[a],e}))}},{key:"getInitialProps",value:function(e,t){var r=this.components["/_app"].Component,n=this._wrapApp(r);return t.AppTree=n,g.loadGetInitialProps(r,{AppTree:n,Component:e,router:this,ctx:t})}},{key:"abortComponentLoad",value:function(t,r){this.clc&&(e.events.emit("routeChangeError",R(),t,r),this.clc(),this.clc=null)}},{key:"notify",value:function(e,t){return this.sub(e,this.components["/_app"].Component,t)}}])&&c(t.prototype,r),n&&c(t,n),e}();G.events=m.default(),t.default=G},4611:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.formatUrl=function(e){var t=e.auth,r=e.hostname,a=e.protocol||"",i=e.pathname||"",u=e.hash||"",c=e.query||"",s=!1;t=t?encodeURIComponent(t).replace(/%3A/i,":")+"@":"",e.host?s=t+e.host:r&&(s=t+(~r.indexOf(":")?"[".concat(r,"]"):r),e.port&&(s+=":"+e.port));c&&"object"===typeof c&&(c=String(n.urlQueryToSearchParams(c)));var l=e.search||c&&"?".concat(c)||"";a&&":"!==a.substr(-1)&&(a+=":");e.slashes||(!a||o.test(a))&&!1!==s?(s="//"+(s||""),i&&"/"!==i[0]&&(i="/"+i)):s||(s="");u&&"#"!==u[0]&&(u="#"+u);l&&"?"!==l[0]&&(l="?"+l);return i=i.replace(/[?#]/g,encodeURIComponent),l=l.replace("#","%23"),"".concat(a).concat(s).concat(i).concat(l).concat(u)};var n=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var r in e)if(Object.prototype.hasOwnProperty.call(e,r)){var n=Object.defineProperty&&Object.getOwnPropertyDescriptor?Object.getOwnPropertyDescriptor(e,r):{};n.get||n.set?Object.defineProperty(t,r,n):t[r]=e[r]}return t.default=e,t}(r(466));var o=/https?|ftp|gopher|file/},3891:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",r="/"===e?"/index":/^\/index(\/|$)/.test(e)?"/index".concat(e):"".concat(e);return r+t}},9820:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.getMiddlewareRegex=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1],r=n.getParametrizedRoute(e),o=t?"(?!_next).*":"",a=t?"(?:(/.*)?)":"";if("routeKeys"in r)return"/"===r.parameterizedRoute?{groups:{},namedRegex:"^/".concat(o,"$"),re:new RegExp("^/".concat(o,"$")),routeKeys:{}}:{groups:r.groups,namedRegex:"^".concat(r.namedParameterizedRoute).concat(a,"$"),re:new RegExp("^".concat(r.parameterizedRoute).concat(a,"$")),routeKeys:r.routeKeys};if("/"===r.parameterizedRoute)return{groups:{},re:new RegExp("^/".concat(o,"$"))};return{groups:{},re:new RegExp("^".concat(r.parameterizedRoute).concat(a,"$"))}};var n=r(4095)},418:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"getMiddlewareRegex",{enumerable:!0,get:function(){return n.getMiddlewareRegex}}),Object.defineProperty(t,"getRouteMatcher",{enumerable:!0,get:function(){return o.getRouteMatcher}}),Object.defineProperty(t,"getRouteRegex",{enumerable:!0,get:function(){return a.getRouteRegex}}),Object.defineProperty(t,"getSortedRoutes",{enumerable:!0,get:function(){return i.getSortedRoutes}}),Object.defineProperty(t,"isDynamicRoute",{enumerable:!0,get:function(){return u.isDynamicRoute}});var n=r(9820),o=r(3888),a=r(4095),i=r(3907),u=r(8689)},8689:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.isDynamicRoute=function(e){return r.test(e)};var r=/\/\[[^/]+?\](?=\/|$)/},6305:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.parseRelativeUrl=function(e,t){var r=new URL(n.getLocationOrigin()),a=t?new URL(t,r):r,i=new URL(e,a),u=i.pathname,c=i.searchParams,s=i.search,l=i.hash,f=i.href;if(i.origin!==r.origin)throw new Error("invariant: invalid relative URL, router received ".concat(e));return{pathname:u,query:o.searchParamsToUrlQuery(c),search:s,hash:l,href:f.slice(r.origin.length)}};var n=r(3794),o=r(466)},466:function(e,t){"use strict";function r(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);r1?t-1:0),n=1;ne.length)&&(t=e.length);for(var r=0,n=new Array(t);r0&&void 0!==arguments[0]?arguments[0]:"/",t=this,r=o(this.children.keys()).sort();null!==this.slugName&&r.splice(r.indexOf("[]"),1),null!==this.restSlugName&&r.splice(r.indexOf("[...]"),1),null!==this.optionalRestSlugName&&r.splice(r.indexOf("[[...]]"),1);var n,a,i,u=r.map((function(r){return t.children.get(r)._smoosh("".concat(e).concat(r,"/"))})).reduce((function(e,t){return o(e).concat(o(t))}),[]);if(null!==this.slugName&&(n=u).push.apply(n,o(this.children.get("[]")._smoosh("".concat(e,"[").concat(this.slugName,"]/")))),!this.placeholder){var c="/"===e?"/":e.slice(0,-1);if(null!=this.optionalRestSlugName)throw new Error('You cannot define a route with the same specificity as a optional catch-all route ("'.concat(c,'" and "').concat(c,"[[...").concat(this.optionalRestSlugName,']]").'));u.unshift(c)}return null!==this.restSlugName&&(a=u).push.apply(a,o(this.children.get("[...]")._smoosh("".concat(e,"[...").concat(this.restSlugName,"]/")))),null!==this.optionalRestSlugName&&(i=u).push.apply(i,o(this.children.get("[[...]]")._smoosh("".concat(e,"[[...").concat(this.optionalRestSlugName,"]]/")))),u}},{key:"_insert",value:function(t,r,n){if(0!==t.length){if(n)throw new Error("Catch-all must be the last part of the URL.");var o=t[0];if(o.startsWith("[")&&o.endsWith("]")){var a=function(e,t){if(null!==e&&e!==t)throw new Error("You cannot use different slug names for the same dynamic path ('".concat(e,"' !== '").concat(t,"')."));r.forEach((function(e){if(e===t)throw new Error('You cannot have the same slug name "'.concat(t,'" repeat within a single dynamic path'));if(e.replace(/\W/g,"")===o.replace(/\W/g,""))throw new Error('You cannot have the slug names "'.concat(e,'" and "').concat(t,'" differ only by non-word symbols within a single dynamic path'))})),r.push(t)},i=o.slice(1,-1),u=!1;if(i.startsWith("[")&&i.endsWith("]")&&(i=i.slice(1,-1),u=!0),i.startsWith("...")&&(i=i.substring(3),n=!0),i.startsWith("[")||i.endsWith("]"))throw new Error("Segment names may not start or end with extra brackets ('".concat(i,"')."));if(i.startsWith("."))throw new Error("Segment names may not start with erroneous periods ('".concat(i,"')."));if(n)if(u){if(null!=this.restSlugName)throw new Error('You cannot use both an required and optional catch-all route at the same level ("[...'.concat(this.restSlugName,']" and "').concat(t[0],'" ).'));a(this.optionalRestSlugName,i),this.optionalRestSlugName=i,o="[[...]]"}else{if(null!=this.optionalRestSlugName)throw new Error('You cannot use both an optional and required catch-all route at the same level ("[[...'.concat(this.optionalRestSlugName,']]" and "').concat(t[0],'").'));a(this.restSlugName,i),this.restSlugName=i,o="[...]"}else{if(u)throw new Error('Optional route parameters are not yet supported ("'.concat(t[0],'").'));a(this.slugName,i),this.slugName=i,o="[]"}}this.children.has(o)||this.children.set(o,new e),this.children.get(o)._insert(t.slice(1),r,n)}else this.placeholder=!1}}])&&n(t.prototype,r),a&&n(t,a),e}()},8027:function(e,t){"use strict";var r;Object.defineProperty(t,"__esModule",{value:!0}),t.setConfig=function(e){r=e},t.default=void 0;t.default=function(){return r}},5188:function(e,t,r){"use strict";function n(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);re.length)&&(t=e.length);for(var r=0,n=new Array(t);r=0;--a){var i=this.tryEntries[a],u=i.completion;if("root"===i.tryLoc)return o("end");if(i.tryLoc<=this.prev){var c=n.call(i,"catchLoc"),s=n.call(i,"finallyLoc");if(c&&s){if(this.prev=0;--r){var o=this.tryEntries[r];if(o.tryLoc<=this.prev&&n.call(o,"finallyLoc")&&this.prev=0;--t){var r=this.tryEntries[t];if(r.finallyLoc===e)return this.complete(r.completion,r.afterLoc),O(r),h}},catch:function(e){for(var t=this.tryEntries.length-1;t>=0;--t){var r=this.tryEntries[t];if(r.tryLoc===e){var n=r.completion;if("throw"===n.type){var o=n.arg;O(r)}return o}}throw new Error("illegal catch attempt")},delegateYield:function(e,r,n){return this.delegate={iterator:R(e),resultName:r,nextLoc:n},"next"===this.method&&(this.arg=t),h}},e}(e.exports);try{regeneratorRuntime=t}catch(r){Function("r","regeneratorRuntime = r")(t)}},8745:function(e){!function(){var t={106:function(e,t){!function(e){"use strict";var t,r,n,o,a=function(e,t){return{name:e,value:void 0===t?-1:t,delta:0,entries:[],id:"v2-".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)}},i=function(e,t){try{if(PerformanceObserver.supportedEntryTypes.includes(e)){if("first-input"===e&&!("PerformanceEventTiming"in self))return;var r=new PerformanceObserver((function(e){return e.getEntries().map(t)}));return r.observe({type:e,buffered:!0}),r}}catch(e){}},u=function(e,t){var r=function r(n){"pagehide"!==n.type&&"hidden"!==document.visibilityState||(e(n),t&&(removeEventListener("visibilitychange",r,!0),removeEventListener("pagehide",r,!0)))};addEventListener("visibilitychange",r,!0),addEventListener("pagehide",r,!0)},c=function(e){addEventListener("pageshow",(function(t){t.persisted&&e(t)}),!0)},s=function(e,t,r){var n;return function(o){t.value>=0&&(o||r)&&(t.delta=t.value-(n||0),(t.delta||void 0===n)&&(n=t.value,e(t)))}},l=-1,f=function(){return"hidden"===document.visibilityState?0:1/0},p=function(){u((function(e){var t=e.timeStamp;l=t}),!0)},d=function(){return l<0&&(l=f(),p(),c((function(){setTimeout((function(){l=f(),p()}),0)}))),{get firstHiddenTime(){return l}}},h=function(e,t){var r,n=d(),o=a("FCP"),u=function(e){"first-contentful-paint"===e.name&&(f&&f.disconnect(),e.startTime=0&&r1e12?new Date:performance.now())-e.timeStamp;"pointerdown"==e.type?function(e,t){var r=function(){b(e,t),o()},n=function(){o()},o=function(){removeEventListener("pointerup",r,m),removeEventListener("pointercancel",n,m)};addEventListener("pointerup",r,m),addEventListener("pointercancel",n,m)}(t,e):b(t,e)}},P=function(e){["mousedown","keydown","touchstart","pointerdown"].forEach((function(t){return e(t,_,m)}))},S=new Set;e.getCLS=function(e,t){y||(h((function(e){v=e.value})),y=!0);var r,n=function(t){v>-1&&e(t)},o=a("CLS",0),l=0,f=[],p=function(e){if(!e.hadRecentInput){var t=f[0],n=f[f.length-1];l&&e.startTime-n.startTime<1e3&&e.startTime-t.startTime<5e3?(l+=e.value,f.push(e)):(l=e.value,f=[e]),l>o.value&&(o.value=l,o.entries=f,r())}},d=i("layout-shift",p);d&&(r=s(n,o,t),u((function(){d.takeRecords().map(p),r(!0)})),c((function(){l=0,v=-1,o=a("CLS",0),r=s(n,o,t)})))},e.getFCP=h,e.getFID=function(e,n){var l,f=d(),p=a("FID"),h=function(e){e.startTime { - throw Error("readFile not implemented"); - }, - writeFile: () => { - throw Error("writeFile not implemented"); - } -}; - -function setFileSystem(fs) { - fileSystem.readFile = fs.readFile; - fileSystem.writeFile = fs.writeFile; -} - -function getFileSystem() { - return fileSystem; -} - -var pluginFactory = {}; - -var unquote$1 = {}; - -Object.defineProperty(unquote$1, "__esModule", { - value: true -}); -unquote$1.default = unquote; -// copied from https://github.com/lakenen/node-unquote -const reg = /['"]/; - -function unquote(str) { - if (!str) { - return ""; - } - - if (reg.test(str.charAt(0))) { - str = str.substr(1); - } - - if (reg.test(str.charAt(str.length - 1))) { - str = str.substr(0, str.length - 1); - } - - return str; -} - -var Parser$1 = {}; - -const matchValueName = /[$]?[\w-]+/g; - -const replaceValueSymbols$2 = (value, replacements) => { - let matches; - - while ((matches = matchValueName.exec(value))) { - const replacement = replacements[matches[0]]; - - if (replacement) { - value = - value.slice(0, matches.index) + - replacement + - value.slice(matchValueName.lastIndex); - - matchValueName.lastIndex -= matches[0].length - replacement.length; - } - } - - return value; -}; - -var replaceValueSymbols_1 = replaceValueSymbols$2; - -const replaceValueSymbols$1 = replaceValueSymbols_1; - -const replaceSymbols$1 = (css, replacements) => { - css.walk((node) => { - if (node.type === "decl" && node.value) { - node.value = replaceValueSymbols$1(node.value.toString(), replacements); - } else if (node.type === "rule" && node.selector) { - node.selector = replaceValueSymbols$1( - node.selector.toString(), - replacements - ); - } else if (node.type === "atrule" && node.params) { - node.params = replaceValueSymbols$1(node.params.toString(), replacements); - } - }); -}; - -var replaceSymbols_1 = replaceSymbols$1; - -const importPattern = /^:import\(("[^"]*"|'[^']*'|[^"']+)\)$/; -const balancedQuotes = /^("[^"]*"|'[^']*'|[^"']+)$/; - -const getDeclsObject = (rule) => { - const object = {}; - - rule.walkDecls((decl) => { - const before = decl.raws.before ? decl.raws.before.trim() : ""; - - object[before + decl.prop] = decl.value; - }); - - return object; -}; -/** - * - * @param {string} css - * @param {boolean} removeRules - * @param {'auto' | 'rule' | 'at-rule'} mode - */ -const extractICSS$2 = (css, removeRules = true, mode = "auto") => { - const icssImports = {}; - const icssExports = {}; - - function addImports(node, path) { - const unquoted = path.replace(/'|"/g, ""); - icssImports[unquoted] = Object.assign( - icssImports[unquoted] || {}, - getDeclsObject(node) - ); - - if (removeRules) { - node.remove(); - } - } - - function addExports(node) { - Object.assign(icssExports, getDeclsObject(node)); - if (removeRules) { - node.remove(); - } - } - - css.each((node) => { - if (node.type === "rule" && mode !== "at-rule") { - if (node.selector.slice(0, 7) === ":import") { - const matches = importPattern.exec(node.selector); - - if (matches) { - addImports(node, matches[1]); - } - } - - if (node.selector === ":export") { - addExports(node); - } - } - - if (node.type === "atrule" && mode !== "rule") { - if (node.name === "icss-import") { - const matches = balancedQuotes.exec(node.params); - - if (matches) { - addImports(node, matches[1]); - } - } - if (node.name === "icss-export") { - addExports(node); - } - } - }); - - return { icssImports, icssExports }; -}; - -var extractICSS_1 = extractICSS$2; - -const createImports = (imports, postcss, mode = "rule") => { - return Object.keys(imports).map((path) => { - const aliases = imports[path]; - const declarations = Object.keys(aliases).map((key) => - postcss.decl({ - prop: key, - value: aliases[key], - raws: { before: "\n " }, - }) - ); - - const hasDeclarations = declarations.length > 0; - - const rule = - mode === "rule" - ? postcss.rule({ - selector: `:import('${path}')`, - raws: { after: hasDeclarations ? "\n" : "" }, - }) - : postcss.atRule({ - name: "icss-import", - params: `'${path}'`, - raws: { after: hasDeclarations ? "\n" : "" }, - }); - - if (hasDeclarations) { - rule.append(declarations); - } - - return rule; - }); -}; - -const createExports = (exports, postcss, mode = "rule") => { - const declarations = Object.keys(exports).map((key) => - postcss.decl({ - prop: key, - value: exports[key], - raws: { before: "\n " }, - }) - ); - - if (declarations.length === 0) { - return []; - } - const rule = - mode === "rule" - ? postcss.rule({ - selector: `:export`, - raws: { after: "\n" }, - }) - : postcss.atRule({ - name: "icss-export", - raws: { after: "\n" }, - }); - - rule.append(declarations); - - return [rule]; -}; - -const createICSSRules$1 = (imports, exports, postcss, mode) => [ - ...createImports(imports, postcss, mode), - ...createExports(exports, postcss, mode), -]; - -var createICSSRules_1 = createICSSRules$1; - -const replaceValueSymbols = replaceValueSymbols_1; -const replaceSymbols = replaceSymbols_1; -const extractICSS$1 = extractICSS_1; -const createICSSRules = createICSSRules_1; - -var src$4 = { - replaceValueSymbols, - replaceSymbols, - extractICSS: extractICSS$1, - createICSSRules, -}; - -Object.defineProperty(Parser$1, "__esModule", { - value: true -}); -Parser$1.default = void 0; - -var _icssUtils = src$4; - -// Initially copied from https://github.com/css-modules/css-modules-loader-core -const importRegexp = /^:import\((.+)\)$/; - -class Parser { - constructor(pathFetcher, trace) { - this.pathFetcher = pathFetcher; - this.plugin = this.plugin.bind(this); - this.exportTokens = {}; - this.translations = {}; - this.trace = trace; - } - - plugin() { - const parser = this; - return { - postcssPlugin: "css-modules-parser", - - async OnceExit(css) { - await Promise.all(parser.fetchAllImports(css)); - parser.linkImportedSymbols(css); - return parser.extractExports(css); - } - - }; - } - - fetchAllImports(css) { - let imports = []; - css.each(node => { - if (node.type == "rule" && node.selector.match(importRegexp)) { - imports.push(this.fetchImport(node, css.source.input.from, imports.length)); - } - }); - return imports; - } - - linkImportedSymbols(css) { - (0, _icssUtils.replaceSymbols)(css, this.translations); - } - - extractExports(css) { - css.each(node => { - if (node.type == "rule" && node.selector == ":export") this.handleExport(node); - }); - } - - handleExport(exportNode) { - exportNode.each(decl => { - if (decl.type == "decl") { - Object.keys(this.translations).forEach(translation => { - decl.value = decl.value.replace(translation, this.translations[translation]); - }); - this.exportTokens[decl.prop] = decl.value; - } - }); - exportNode.remove(); - } - - async fetchImport(importNode, relativeTo, depNr) { - const file = importNode.selector.match(importRegexp)[1]; - const depTrace = this.trace + String.fromCharCode(depNr); - const exports = await this.pathFetcher(file, relativeTo, depTrace); - - try { - importNode.each(decl => { - if (decl.type == "decl") { - this.translations[decl.prop] = exports[decl.value]; - } - }); - importNode.remove(); - } catch (err) { - console.log(err); - } - } - -} - -Parser$1.default = Parser; - -var saveJSON$1 = {}; - -Object.defineProperty(saveJSON$1, "__esModule", { - value: true -}); -saveJSON$1.default = saveJSON; - -var _fs$2 = fs; - -function saveJSON(cssFile, json) { - return new Promise((resolve, reject) => { - const { - writeFile - } = (0, _fs$2.getFileSystem)(); - writeFile(`${cssFile}.json`, JSON.stringify(json), e => e ? reject(e) : resolve(json)); - }); -} - -var localsConvention = {}; - -/** - * lodash (Custom Build) - * Build: `lodash modularize exports="npm" -o ./` - * Copyright jQuery Foundation and other contributors - * Released under MIT license - * Based on Underscore.js 1.8.3 - * Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors - */ - -/** Used as references for various `Number` constants. */ -var INFINITY = 1 / 0; - -/** `Object#toString` result references. */ -var symbolTag = '[object Symbol]'; - -/** Used to match words composed of alphanumeric characters. */ -var reAsciiWord = /[^\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\x7f]+/g; - -/** Used to match Latin Unicode letters (excluding mathematical operators). */ -var reLatin = /[\xc0-\xd6\xd8-\xf6\xf8-\xff\u0100-\u017f]/g; - -/** Used to compose unicode character classes. */ -var rsAstralRange = '\\ud800-\\udfff', - rsComboMarksRange = '\\u0300-\\u036f\\ufe20-\\ufe23', - rsComboSymbolsRange = '\\u20d0-\\u20f0', - rsDingbatRange = '\\u2700-\\u27bf', - rsLowerRange = 'a-z\\xdf-\\xf6\\xf8-\\xff', - rsMathOpRange = '\\xac\\xb1\\xd7\\xf7', - rsNonCharRange = '\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf', - rsPunctuationRange = '\\u2000-\\u206f', - rsSpaceRange = ' \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000', - rsUpperRange = 'A-Z\\xc0-\\xd6\\xd8-\\xde', - rsVarRange = '\\ufe0e\\ufe0f', - rsBreakRange = rsMathOpRange + rsNonCharRange + rsPunctuationRange + rsSpaceRange; - -/** Used to compose unicode capture groups. */ -var rsApos = "['\u2019]", - rsAstral = '[' + rsAstralRange + ']', - rsBreak = '[' + rsBreakRange + ']', - rsCombo = '[' + rsComboMarksRange + rsComboSymbolsRange + ']', - rsDigits = '\\d+', - rsDingbat = '[' + rsDingbatRange + ']', - rsLower = '[' + rsLowerRange + ']', - rsMisc = '[^' + rsAstralRange + rsBreakRange + rsDigits + rsDingbatRange + rsLowerRange + rsUpperRange + ']', - rsFitz = '\\ud83c[\\udffb-\\udfff]', - rsModifier = '(?:' + rsCombo + '|' + rsFitz + ')', - rsNonAstral = '[^' + rsAstralRange + ']', - rsRegional = '(?:\\ud83c[\\udde6-\\uddff]){2}', - rsSurrPair = '[\\ud800-\\udbff][\\udc00-\\udfff]', - rsUpper = '[' + rsUpperRange + ']', - rsZWJ = '\\u200d'; - -/** Used to compose unicode regexes. */ -var rsLowerMisc = '(?:' + rsLower + '|' + rsMisc + ')', - rsUpperMisc = '(?:' + rsUpper + '|' + rsMisc + ')', - rsOptLowerContr = '(?:' + rsApos + '(?:d|ll|m|re|s|t|ve))?', - rsOptUpperContr = '(?:' + rsApos + '(?:D|LL|M|RE|S|T|VE))?', - reOptMod = rsModifier + '?', - rsOptVar = '[' + rsVarRange + ']?', - rsOptJoin = '(?:' + rsZWJ + '(?:' + [rsNonAstral, rsRegional, rsSurrPair].join('|') + ')' + rsOptVar + reOptMod + ')*', - rsSeq = rsOptVar + reOptMod + rsOptJoin, - rsEmoji = '(?:' + [rsDingbat, rsRegional, rsSurrPair].join('|') + ')' + rsSeq, - rsSymbol = '(?:' + [rsNonAstral + rsCombo + '?', rsCombo, rsRegional, rsSurrPair, rsAstral].join('|') + ')'; - -/** Used to match apostrophes. */ -var reApos = RegExp(rsApos, 'g'); - -/** - * Used to match [combining diacritical marks](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks) and - * [combining diacritical marks for symbols](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks_for_Symbols). - */ -var reComboMark = RegExp(rsCombo, 'g'); - -/** Used to match [string symbols](https://mathiasbynens.be/notes/javascript-unicode). */ -var reUnicode = RegExp(rsFitz + '(?=' + rsFitz + ')|' + rsSymbol + rsSeq, 'g'); - -/** Used to match complex or compound words. */ -var reUnicodeWord = RegExp([ - rsUpper + '?' + rsLower + '+' + rsOptLowerContr + '(?=' + [rsBreak, rsUpper, '$'].join('|') + ')', - rsUpperMisc + '+' + rsOptUpperContr + '(?=' + [rsBreak, rsUpper + rsLowerMisc, '$'].join('|') + ')', - rsUpper + '?' + rsLowerMisc + '+' + rsOptLowerContr, - rsUpper + '+' + rsOptUpperContr, - rsDigits, - rsEmoji -].join('|'), 'g'); - -/** Used to detect strings with [zero-width joiners or code points from the astral planes](http://eev.ee/blog/2015/09/12/dark-corners-of-unicode/). */ -var reHasUnicode = RegExp('[' + rsZWJ + rsAstralRange + rsComboMarksRange + rsComboSymbolsRange + rsVarRange + ']'); - -/** Used to detect strings that need a more robust regexp to match words. */ -var reHasUnicodeWord = /[a-z][A-Z]|[A-Z]{2,}[a-z]|[0-9][a-zA-Z]|[a-zA-Z][0-9]|[^a-zA-Z0-9 ]/; - -/** Used to map Latin Unicode letters to basic Latin letters. */ -var deburredLetters = { - // Latin-1 Supplement block. - '\xc0': 'A', '\xc1': 'A', '\xc2': 'A', '\xc3': 'A', '\xc4': 'A', '\xc5': 'A', - '\xe0': 'a', '\xe1': 'a', '\xe2': 'a', '\xe3': 'a', '\xe4': 'a', '\xe5': 'a', - '\xc7': 'C', '\xe7': 'c', - '\xd0': 'D', '\xf0': 'd', - '\xc8': 'E', '\xc9': 'E', '\xca': 'E', '\xcb': 'E', - '\xe8': 'e', '\xe9': 'e', '\xea': 'e', '\xeb': 'e', - '\xcc': 'I', '\xcd': 'I', '\xce': 'I', '\xcf': 'I', - '\xec': 'i', '\xed': 'i', '\xee': 'i', '\xef': 'i', - '\xd1': 'N', '\xf1': 'n', - '\xd2': 'O', '\xd3': 'O', '\xd4': 'O', '\xd5': 'O', '\xd6': 'O', '\xd8': 'O', - '\xf2': 'o', '\xf3': 'o', '\xf4': 'o', '\xf5': 'o', '\xf6': 'o', '\xf8': 'o', - '\xd9': 'U', '\xda': 'U', '\xdb': 'U', '\xdc': 'U', - '\xf9': 'u', '\xfa': 'u', '\xfb': 'u', '\xfc': 'u', - '\xdd': 'Y', '\xfd': 'y', '\xff': 'y', - '\xc6': 'Ae', '\xe6': 'ae', - '\xde': 'Th', '\xfe': 'th', - '\xdf': 'ss', - // Latin Extended-A block. - '\u0100': 'A', '\u0102': 'A', '\u0104': 'A', - '\u0101': 'a', '\u0103': 'a', '\u0105': 'a', - '\u0106': 'C', '\u0108': 'C', '\u010a': 'C', '\u010c': 'C', - '\u0107': 'c', '\u0109': 'c', '\u010b': 'c', '\u010d': 'c', - '\u010e': 'D', '\u0110': 'D', '\u010f': 'd', '\u0111': 'd', - '\u0112': 'E', '\u0114': 'E', '\u0116': 'E', '\u0118': 'E', '\u011a': 'E', - '\u0113': 'e', '\u0115': 'e', '\u0117': 'e', '\u0119': 'e', '\u011b': 'e', - '\u011c': 'G', '\u011e': 'G', '\u0120': 'G', '\u0122': 'G', - '\u011d': 'g', '\u011f': 'g', '\u0121': 'g', '\u0123': 'g', - '\u0124': 'H', '\u0126': 'H', '\u0125': 'h', '\u0127': 'h', - '\u0128': 'I', '\u012a': 'I', '\u012c': 'I', '\u012e': 'I', '\u0130': 'I', - '\u0129': 'i', '\u012b': 'i', '\u012d': 'i', '\u012f': 'i', '\u0131': 'i', - '\u0134': 'J', '\u0135': 'j', - '\u0136': 'K', '\u0137': 'k', '\u0138': 'k', - '\u0139': 'L', '\u013b': 'L', '\u013d': 'L', '\u013f': 'L', '\u0141': 'L', - '\u013a': 'l', '\u013c': 'l', '\u013e': 'l', '\u0140': 'l', '\u0142': 'l', - '\u0143': 'N', '\u0145': 'N', '\u0147': 'N', '\u014a': 'N', - '\u0144': 'n', '\u0146': 'n', '\u0148': 'n', '\u014b': 'n', - '\u014c': 'O', '\u014e': 'O', '\u0150': 'O', - '\u014d': 'o', '\u014f': 'o', '\u0151': 'o', - '\u0154': 'R', '\u0156': 'R', '\u0158': 'R', - '\u0155': 'r', '\u0157': 'r', '\u0159': 'r', - '\u015a': 'S', '\u015c': 'S', '\u015e': 'S', '\u0160': 'S', - '\u015b': 's', '\u015d': 's', '\u015f': 's', '\u0161': 's', - '\u0162': 'T', '\u0164': 'T', '\u0166': 'T', - '\u0163': 't', '\u0165': 't', '\u0167': 't', - '\u0168': 'U', '\u016a': 'U', '\u016c': 'U', '\u016e': 'U', '\u0170': 'U', '\u0172': 'U', - '\u0169': 'u', '\u016b': 'u', '\u016d': 'u', '\u016f': 'u', '\u0171': 'u', '\u0173': 'u', - '\u0174': 'W', '\u0175': 'w', - '\u0176': 'Y', '\u0177': 'y', '\u0178': 'Y', - '\u0179': 'Z', '\u017b': 'Z', '\u017d': 'Z', - '\u017a': 'z', '\u017c': 'z', '\u017e': 'z', - '\u0132': 'IJ', '\u0133': 'ij', - '\u0152': 'Oe', '\u0153': 'oe', - '\u0149': "'n", '\u017f': 'ss' -}; - -/** Detect free variable `global` from Node.js. */ -var freeGlobal = typeof commonjsGlobal == 'object' && commonjsGlobal && commonjsGlobal.Object === Object && commonjsGlobal; - -/** Detect free variable `self`. */ -var freeSelf = typeof self == 'object' && self && self.Object === Object && self; - -/** Used as a reference to the global object. */ -var root$2 = freeGlobal || freeSelf || Function('return this')(); - -/** - * A specialized version of `_.reduce` for arrays without support for - * iteratee shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @param {*} [accumulator] The initial value. - * @param {boolean} [initAccum] Specify using the first element of `array` as - * the initial value. - * @returns {*} Returns the accumulated value. - */ -function arrayReduce(array, iteratee, accumulator, initAccum) { - var index = -1, - length = array ? array.length : 0; - - if (initAccum && length) { - accumulator = array[++index]; - } - while (++index < length) { - accumulator = iteratee(accumulator, array[index], index, array); - } - return accumulator; -} - -/** - * Converts an ASCII `string` to an array. - * - * @private - * @param {string} string The string to convert. - * @returns {Array} Returns the converted array. - */ -function asciiToArray(string) { - return string.split(''); -} - -/** - * Splits an ASCII `string` into an array of its words. - * - * @private - * @param {string} The string to inspect. - * @returns {Array} Returns the words of `string`. - */ -function asciiWords(string) { - return string.match(reAsciiWord) || []; -} - -/** - * The base implementation of `_.propertyOf` without support for deep paths. - * - * @private - * @param {Object} object The object to query. - * @returns {Function} Returns the new accessor function. - */ -function basePropertyOf(object) { - return function(key) { - return object == null ? undefined : object[key]; - }; -} - -/** - * Used by `_.deburr` to convert Latin-1 Supplement and Latin Extended-A - * letters to basic Latin letters. - * - * @private - * @param {string} letter The matched letter to deburr. - * @returns {string} Returns the deburred letter. - */ -var deburrLetter = basePropertyOf(deburredLetters); - -/** - * Checks if `string` contains Unicode symbols. - * - * @private - * @param {string} string The string to inspect. - * @returns {boolean} Returns `true` if a symbol is found, else `false`. - */ -function hasUnicode(string) { - return reHasUnicode.test(string); -} - -/** - * Checks if `string` contains a word composed of Unicode symbols. - * - * @private - * @param {string} string The string to inspect. - * @returns {boolean} Returns `true` if a word is found, else `false`. - */ -function hasUnicodeWord(string) { - return reHasUnicodeWord.test(string); -} - -/** - * Converts `string` to an array. - * - * @private - * @param {string} string The string to convert. - * @returns {Array} Returns the converted array. - */ -function stringToArray(string) { - return hasUnicode(string) - ? unicodeToArray(string) - : asciiToArray(string); -} - -/** - * Converts a Unicode `string` to an array. - * - * @private - * @param {string} string The string to convert. - * @returns {Array} Returns the converted array. - */ -function unicodeToArray(string) { - return string.match(reUnicode) || []; -} - -/** - * Splits a Unicode `string` into an array of its words. - * - * @private - * @param {string} The string to inspect. - * @returns {Array} Returns the words of `string`. - */ -function unicodeWords(string) { - return string.match(reUnicodeWord) || []; -} - -/** Used for built-in method references. */ -var objectProto = Object.prototype; - -/** - * Used to resolve the - * [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring) - * of values. - */ -var objectToString = objectProto.toString; - -/** Built-in value references. */ -var Symbol$1 = root$2.Symbol; - -/** Used to convert symbols to primitives and strings. */ -var symbolProto = Symbol$1 ? Symbol$1.prototype : undefined, - symbolToString = symbolProto ? symbolProto.toString : undefined; - -/** - * The base implementation of `_.slice` without an iteratee call guard. - * - * @private - * @param {Array} array The array to slice. - * @param {number} [start=0] The start position. - * @param {number} [end=array.length] The end position. - * @returns {Array} Returns the slice of `array`. - */ -function baseSlice(array, start, end) { - var index = -1, - length = array.length; - - if (start < 0) { - start = -start > length ? 0 : (length + start); - } - end = end > length ? length : end; - if (end < 0) { - end += length; - } - length = start > end ? 0 : ((end - start) >>> 0); - start >>>= 0; - - var result = Array(length); - while (++index < length) { - result[index] = array[index + start]; - } - return result; -} - -/** - * The base implementation of `_.toString` which doesn't convert nullish - * values to empty strings. - * - * @private - * @param {*} value The value to process. - * @returns {string} Returns the string. - */ -function baseToString(value) { - // Exit early for strings to avoid a performance hit in some environments. - if (typeof value == 'string') { - return value; - } - if (isSymbol(value)) { - return symbolToString ? symbolToString.call(value) : ''; - } - var result = (value + ''); - return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result; -} - -/** - * Casts `array` to a slice if it's needed. - * - * @private - * @param {Array} array The array to inspect. - * @param {number} start The start position. - * @param {number} [end=array.length] The end position. - * @returns {Array} Returns the cast slice. - */ -function castSlice(array, start, end) { - var length = array.length; - end = end === undefined ? length : end; - return (!start && end >= length) ? array : baseSlice(array, start, end); -} - -/** - * Creates a function like `_.lowerFirst`. - * - * @private - * @param {string} methodName The name of the `String` case method to use. - * @returns {Function} Returns the new case function. - */ -function createCaseFirst(methodName) { - return function(string) { - string = toString(string); - - var strSymbols = hasUnicode(string) - ? stringToArray(string) - : undefined; - - var chr = strSymbols - ? strSymbols[0] - : string.charAt(0); - - var trailing = strSymbols - ? castSlice(strSymbols, 1).join('') - : string.slice(1); - - return chr[methodName]() + trailing; - }; -} - -/** - * Creates a function like `_.camelCase`. - * - * @private - * @param {Function} callback The function to combine each word. - * @returns {Function} Returns the new compounder function. - */ -function createCompounder(callback) { - return function(string) { - return arrayReduce(words(deburr(string).replace(reApos, '')), callback, ''); - }; -} - -/** - * Checks if `value` is object-like. A value is object-like if it's not `null` - * and has a `typeof` result of "object". - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is object-like, else `false`. - * @example - * - * _.isObjectLike({}); - * // => true - * - * _.isObjectLike([1, 2, 3]); - * // => true - * - * _.isObjectLike(_.noop); - * // => false - * - * _.isObjectLike(null); - * // => false - */ -function isObjectLike(value) { - return !!value && typeof value == 'object'; -} - -/** - * Checks if `value` is classified as a `Symbol` primitive or object. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a symbol, else `false`. - * @example - * - * _.isSymbol(Symbol.iterator); - * // => true - * - * _.isSymbol('abc'); - * // => false - */ -function isSymbol(value) { - return typeof value == 'symbol' || - (isObjectLike(value) && objectToString.call(value) == symbolTag); -} - -/** - * Converts `value` to a string. An empty string is returned for `null` - * and `undefined` values. The sign of `-0` is preserved. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to process. - * @returns {string} Returns the string. - * @example - * - * _.toString(null); - * // => '' - * - * _.toString(-0); - * // => '-0' - * - * _.toString([1, 2, 3]); - * // => '1,2,3' - */ -function toString(value) { - return value == null ? '' : baseToString(value); -} - -/** - * Converts `string` to [camel case](https://en.wikipedia.org/wiki/CamelCase). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the camel cased string. - * @example - * - * _.camelCase('Foo Bar'); - * // => 'fooBar' - * - * _.camelCase('--foo-bar--'); - * // => 'fooBar' - * - * _.camelCase('__FOO_BAR__'); - * // => 'fooBar' - */ -var camelCase = createCompounder(function(result, word, index) { - word = word.toLowerCase(); - return result + (index ? capitalize(word) : word); -}); - -/** - * Converts the first character of `string` to upper case and the remaining - * to lower case. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to capitalize. - * @returns {string} Returns the capitalized string. - * @example - * - * _.capitalize('FRED'); - * // => 'Fred' - */ -function capitalize(string) { - return upperFirst(toString(string).toLowerCase()); -} - -/** - * Deburrs `string` by converting - * [Latin-1 Supplement](https://en.wikipedia.org/wiki/Latin-1_Supplement_(Unicode_block)#Character_table) - * and [Latin Extended-A](https://en.wikipedia.org/wiki/Latin_Extended-A) - * letters to basic Latin letters and removing - * [combining diacritical marks](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to deburr. - * @returns {string} Returns the deburred string. - * @example - * - * _.deburr('déjà vu'); - * // => 'deja vu' - */ -function deburr(string) { - string = toString(string); - return string && string.replace(reLatin, deburrLetter).replace(reComboMark, ''); -} - -/** - * Converts the first character of `string` to upper case. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the converted string. - * @example - * - * _.upperFirst('fred'); - * // => 'Fred' - * - * _.upperFirst('FRED'); - * // => 'FRED' - */ -var upperFirst = createCaseFirst('toUpperCase'); - -/** - * Splits `string` into an array of its words. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to inspect. - * @param {RegExp|string} [pattern] The pattern to match words. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Array} Returns the words of `string`. - * @example - * - * _.words('fred, barney, & pebbles'); - * // => ['fred', 'barney', 'pebbles'] - * - * _.words('fred, barney, & pebbles', /[^, ]+/g); - * // => ['fred', 'barney', '&', 'pebbles'] - */ -function words(string, pattern, guard) { - string = toString(string); - pattern = guard ? undefined : pattern; - - if (pattern === undefined) { - return hasUnicodeWord(string) ? unicodeWords(string) : asciiWords(string); - } - return string.match(pattern) || []; -} - -var lodash_camelcase = camelCase; - -Object.defineProperty(localsConvention, "__esModule", { - value: true -}); -localsConvention.makeLocalsConventionReducer = makeLocalsConventionReducer; - -var _lodash = _interopRequireDefault$5(lodash_camelcase); - -function _interopRequireDefault$5(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -function dashesCamelCase(string) { - return string.replace(/-+(\w)/g, (_, firstLetter) => firstLetter.toUpperCase()); -} - -function makeLocalsConventionReducer(localsConvention, inputFile) { - const isFunc = typeof localsConvention === "function"; - return (tokens, [className, value]) => { - if (isFunc) { - const convention = localsConvention(className, value, inputFile); - tokens[convention] = value; - return tokens; - } - - switch (localsConvention) { - case "camelCase": - tokens[className] = value; - tokens[(0, _lodash.default)(className)] = value; - break; - - case "camelCaseOnly": - tokens[(0, _lodash.default)(className)] = value; - break; - - case "dashes": - tokens[className] = value; - tokens[dashesCamelCase(className)] = value; - break; - - case "dashesOnly": - tokens[dashesCamelCase(className)] = value; - break; - } - - return tokens; - }; -} - -var FileSystemLoader$1 = {}; - -Object.defineProperty(FileSystemLoader$1, "__esModule", { - value: true -}); -FileSystemLoader$1.default = void 0; - -var _postcss$1 = _interopRequireDefault$4(postcss$1); - -var _path = _interopRequireDefault$4(require$$0$4); - -var _Parser$1 = _interopRequireDefault$4(Parser$1); - -var _fs$1 = fs; - -function _interopRequireDefault$4(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -// Initially copied from https://github.com/css-modules/css-modules-loader-core -class Core { - constructor(plugins) { - this.plugins = plugins || Core.defaultPlugins; - } - - async load(sourceString, sourcePath, trace, pathFetcher) { - const parser = new _Parser$1.default(pathFetcher, trace); - const plugins = this.plugins.concat([parser.plugin()]); - const result = await (0, _postcss$1.default)(plugins).process(sourceString, { - from: sourcePath - }); - return { - injectableSource: result.css, - exportTokens: parser.exportTokens - }; - } - -} // Sorts dependencies in the following way: -// AAA comes before AA and A -// AB comes after AA and before A -// All Bs come after all As -// This ensures that the files are always returned in the following order: -// - In the order they were required, except -// - After all their dependencies - - -const traceKeySorter = (a, b) => { - if (a.length < b.length) { - return a < b.substring(0, a.length) ? -1 : 1; - } - - if (a.length > b.length) { - return a.substring(0, b.length) <= b ? -1 : 1; - } - - return a < b ? -1 : 1; -}; - -class FileSystemLoader { - constructor(root, plugins, fileResolve) { - if (root === "/" && process.platform === "win32") { - const cwdDrive = process.cwd().slice(0, 3); - - if (!/^[A-Za-z]:\\$/.test(cwdDrive)) { - throw new Error(`Failed to obtain root from "${process.cwd()}".`); - } - - root = cwdDrive; - } - - this.root = root; - this.fileResolve = fileResolve; - this.sources = {}; - this.traces = {}; - this.importNr = 0; - this.core = new Core(plugins); - this.tokensByFile = {}; - this.fs = (0, _fs$1.getFileSystem)(); - } - - async fetch(_newPath, relativeTo, _trace) { - const newPath = _newPath.replace(/^["']|["']$/g, ""); - - const trace = _trace || String.fromCharCode(this.importNr++); - - const useFileResolve = typeof this.fileResolve === "function"; - const fileResolvedPath = useFileResolve ? await this.fileResolve(newPath, relativeTo) : await Promise.resolve(); - - if (fileResolvedPath && !_path.default.isAbsolute(fileResolvedPath)) { - throw new Error('The returned path from the "fileResolve" option must be absolute.'); - } - - const relativeDir = _path.default.dirname(relativeTo); - - const rootRelativePath = fileResolvedPath || _path.default.resolve(relativeDir, newPath); - - let fileRelativePath = fileResolvedPath || _path.default.resolve(_path.default.resolve(this.root, relativeDir), newPath); // if the path is not relative or absolute, try to resolve it in node_modules - - - if (!useFileResolve && newPath[0] !== "." && !_path.default.isAbsolute(newPath)) { - try { - fileRelativePath = require.resolve(newPath); - } catch (e) {// noop - } - } - - const tokens = this.tokensByFile[fileRelativePath]; - if (tokens) return tokens; - return new Promise((resolve, reject) => { - this.fs.readFile(fileRelativePath, "utf-8", async (err, source) => { - if (err) reject(err); - const { - injectableSource, - exportTokens - } = await this.core.load(source, rootRelativePath, trace, this.fetch.bind(this)); - this.sources[fileRelativePath] = injectableSource; - this.traces[trace] = fileRelativePath; - this.tokensByFile[fileRelativePath] = exportTokens; - resolve(exportTokens); - }); - }); - } - - get finalSource() { - const traces = this.traces; - const sources = this.sources; - let written = new Set(); - return Object.keys(traces).sort(traceKeySorter).map(key => { - const filename = traces[key]; - - if (written.has(filename)) { - return null; - } - - written.add(filename); - return sources[filename]; - }).join(""); - } - -} - -FileSystemLoader$1.default = FileSystemLoader; - -var scoping = {}; - -var src$3 = {exports: {}}; - -const PERMANENT_MARKER = 2; -const TEMPORARY_MARKER = 1; - -function createError(node, graph) { - const er = new Error("Nondeterministic import's order"); - - const related = graph[node]; - const relatedNode = related.find( - (relatedNode) => graph[relatedNode].indexOf(node) > -1 - ); - - er.nodes = [node, relatedNode]; - - return er; -} - -function walkGraph(node, graph, state, result, strict) { - if (state[node] === PERMANENT_MARKER) { - return; - } - - if (state[node] === TEMPORARY_MARKER) { - if (strict) { - return createError(node, graph); - } - - return; - } - - state[node] = TEMPORARY_MARKER; - - const children = graph[node]; - const length = children.length; - - for (let i = 0; i < length; ++i) { - const error = walkGraph(children[i], graph, state, result, strict); - - if (error instanceof Error) { - return error; - } - } - - state[node] = PERMANENT_MARKER; - - result.push(node); -} - -function topologicalSort$1(graph, strict) { - const result = []; - const state = {}; - - const nodes = Object.keys(graph); - const length = nodes.length; - - for (let i = 0; i < length; ++i) { - const er = walkGraph(nodes[i], graph, state, result, strict); - - if (er instanceof Error) { - return er; - } - } - - return result; -} - -var topologicalSort_1 = topologicalSort$1; - -const topologicalSort = topologicalSort_1; - -const matchImports$1 = /^(.+?)\s+from\s+(?:"([^"]+)"|'([^']+)'|(global))$/; -const icssImport = /^:import\((?:"([^"]+)"|'([^']+)')\)/; - -const VISITED_MARKER = 1; - -/** - * :import('G') {} - * - * Rule - * composes: ... from 'A' - * composes: ... from 'B' - - * Rule - * composes: ... from 'A' - * composes: ... from 'A' - * composes: ... from 'C' - * - * Results in: - * - * graph: { - * G: [], - * A: [], - * B: ['A'], - * C: ['A'], - * } - */ -function addImportToGraph(importId, parentId, graph, visited) { - const siblingsId = parentId + "_" + "siblings"; - const visitedId = parentId + "_" + importId; - - if (visited[visitedId] !== VISITED_MARKER) { - if (!Array.isArray(visited[siblingsId])) { - visited[siblingsId] = []; - } - - const siblings = visited[siblingsId]; - - if (Array.isArray(graph[importId])) { - graph[importId] = graph[importId].concat(siblings); - } else { - graph[importId] = siblings.slice(); - } - - visited[visitedId] = VISITED_MARKER; - - siblings.push(importId); - } -} - -src$3.exports = (options = {}) => { - let importIndex = 0; - const createImportedName = - typeof options.createImportedName !== "function" - ? (importName /*, path*/) => - `i__imported_${importName.replace(/\W/g, "_")}_${importIndex++}` - : options.createImportedName; - const failOnWrongOrder = options.failOnWrongOrder; - - return { - postcssPlugin: "postcss-modules-extract-imports", - prepare() { - const graph = {}; - const visited = {}; - const existingImports = {}; - const importDecls = {}; - const imports = {}; - - return { - Once(root, postcss) { - // Check the existing imports order and save refs - root.walkRules((rule) => { - const matches = icssImport.exec(rule.selector); - - if (matches) { - const [, /*match*/ doubleQuotePath, singleQuotePath] = matches; - const importPath = doubleQuotePath || singleQuotePath; - - addImportToGraph(importPath, "root", graph, visited); - - existingImports[importPath] = rule; - } - }); - - root.walkDecls(/^composes$/, (declaration) => { - const matches = declaration.value.match(matchImports$1); - - if (!matches) { - return; - } - - let tmpSymbols; - let [ - , - /*match*/ symbols, - doubleQuotePath, - singleQuotePath, - global, - ] = matches; - - if (global) { - // Composing globals simply means changing these classes to wrap them in global(name) - tmpSymbols = symbols.split(/\s+/).map((s) => `global(${s})`); - } else { - const importPath = doubleQuotePath || singleQuotePath; - - let parent = declaration.parent; - let parentIndexes = ""; - - while (parent.type !== "root") { - parentIndexes = - parent.parent.index(parent) + "_" + parentIndexes; - parent = parent.parent; - } - - const { selector } = declaration.parent; - const parentRule = `_${parentIndexes}${selector}`; - - addImportToGraph(importPath, parentRule, graph, visited); - - importDecls[importPath] = declaration; - imports[importPath] = imports[importPath] || {}; - - tmpSymbols = symbols.split(/\s+/).map((s) => { - if (!imports[importPath][s]) { - imports[importPath][s] = createImportedName(s, importPath); - } - - return imports[importPath][s]; - }); - } - - declaration.value = tmpSymbols.join(" "); - }); - - const importsOrder = topologicalSort(graph, failOnWrongOrder); - - if (importsOrder instanceof Error) { - const importPath = importsOrder.nodes.find((importPath) => - // eslint-disable-next-line no-prototype-builtins - importDecls.hasOwnProperty(importPath) - ); - const decl = importDecls[importPath]; - - throw decl.error( - "Failed to resolve order of composed modules " + - importsOrder.nodes - .map((importPath) => "`" + importPath + "`") - .join(", ") + - ".", - { - plugin: "postcss-modules-extract-imports", - word: "composes", - } - ); - } - - let lastImportRule; - - importsOrder.forEach((path) => { - const importedSymbols = imports[path]; - let rule = existingImports[path]; - - if (!rule && importedSymbols) { - rule = postcss.rule({ - selector: `:import("${path}")`, - raws: { after: "\n" }, - }); - - if (lastImportRule) { - root.insertAfter(lastImportRule, rule); - } else { - root.prepend(rule); - } - } - - lastImportRule = rule; - - if (!importedSymbols) { - return; - } - - Object.keys(importedSymbols).forEach((importedSymbol) => { - rule.append( - postcss.decl({ - value: importedSymbol, - prop: importedSymbols[importedSymbol], - raws: { before: "\n " }, - }) - ); - }); - }); - }, - }; - }, - }; -}; - -src$3.exports.postcss = true; - -var srcExports$2 = src$3.exports; - -var wasmHash = {exports: {}}; - -/* - MIT License http://www.opensource.org/licenses/mit-license.php - Author Tobias Koppers @sokra -*/ - -var hasRequiredWasmHash; - -function requireWasmHash () { - if (hasRequiredWasmHash) return wasmHash.exports; - hasRequiredWasmHash = 1; - - // 65536 is the size of a wasm memory page - // 64 is the maximum chunk size for every possible wasm hash implementation - // 4 is the maximum number of bytes per char for string encoding (max is utf-8) - // ~3 makes sure that it's always a block of 4 chars, so avoid partially encoded bytes for base64 - const MAX_SHORT_STRING = Math.floor((65536 - 64) / 4) & ~3; - - class WasmHash { - /** - * @param {WebAssembly.Instance} instance wasm instance - * @param {WebAssembly.Instance[]} instancesPool pool of instances - * @param {number} chunkSize size of data chunks passed to wasm - * @param {number} digestSize size of digest returned by wasm - */ - constructor(instance, instancesPool, chunkSize, digestSize) { - const exports = /** @type {any} */ (instance.exports); - - exports.init(); - - this.exports = exports; - this.mem = Buffer.from(exports.memory.buffer, 0, 65536); - this.buffered = 0; - this.instancesPool = instancesPool; - this.chunkSize = chunkSize; - this.digestSize = digestSize; - } - - reset() { - this.buffered = 0; - this.exports.init(); - } - - /** - * @param {Buffer | string} data data - * @param {BufferEncoding=} encoding encoding - * @returns {this} itself - */ - update(data, encoding) { - if (typeof data === "string") { - while (data.length > MAX_SHORT_STRING) { - this._updateWithShortString(data.slice(0, MAX_SHORT_STRING), encoding); - data = data.slice(MAX_SHORT_STRING); - } - - this._updateWithShortString(data, encoding); - - return this; - } - - this._updateWithBuffer(data); - - return this; - } - - /** - * @param {string} data data - * @param {BufferEncoding=} encoding encoding - * @returns {void} - */ - _updateWithShortString(data, encoding) { - const { exports, buffered, mem, chunkSize } = this; - - let endPos; - - if (data.length < 70) { - if (!encoding || encoding === "utf-8" || encoding === "utf8") { - endPos = buffered; - for (let i = 0; i < data.length; i++) { - const cc = data.charCodeAt(i); - - if (cc < 0x80) { - mem[endPos++] = cc; - } else if (cc < 0x800) { - mem[endPos] = (cc >> 6) | 0xc0; - mem[endPos + 1] = (cc & 0x3f) | 0x80; - endPos += 2; - } else { - // bail-out for weird chars - endPos += mem.write(data.slice(i), endPos, encoding); - break; - } - } - } else if (encoding === "latin1") { - endPos = buffered; - - for (let i = 0; i < data.length; i++) { - const cc = data.charCodeAt(i); - - mem[endPos++] = cc; - } - } else { - endPos = buffered + mem.write(data, buffered, encoding); - } - } else { - endPos = buffered + mem.write(data, buffered, encoding); - } - - if (endPos < chunkSize) { - this.buffered = endPos; - } else { - const l = endPos & ~(this.chunkSize - 1); - - exports.update(l); - - const newBuffered = endPos - l; - - this.buffered = newBuffered; - - if (newBuffered > 0) { - mem.copyWithin(0, l, endPos); - } - } - } - - /** - * @param {Buffer} data data - * @returns {void} - */ - _updateWithBuffer(data) { - const { exports, buffered, mem } = this; - const length = data.length; - - if (buffered + length < this.chunkSize) { - data.copy(mem, buffered, 0, length); - - this.buffered += length; - } else { - const l = (buffered + length) & ~(this.chunkSize - 1); - - if (l > 65536) { - let i = 65536 - buffered; - - data.copy(mem, buffered, 0, i); - exports.update(65536); - - const stop = l - buffered - 65536; - - while (i < stop) { - data.copy(mem, 0, i, i + 65536); - exports.update(65536); - i += 65536; - } - - data.copy(mem, 0, i, l - buffered); - - exports.update(l - buffered - i); - } else { - data.copy(mem, buffered, 0, l - buffered); - - exports.update(l); - } - - const newBuffered = length + buffered - l; - - this.buffered = newBuffered; - - if (newBuffered > 0) { - data.copy(mem, 0, length - newBuffered, length); - } - } - } - - digest(type) { - const { exports, buffered, mem, digestSize } = this; - - exports.final(buffered); - - this.instancesPool.push(this); - - const hex = mem.toString("latin1", 0, digestSize); - - if (type === "hex") { - return hex; - } - - if (type === "binary" || !type) { - return Buffer.from(hex, "hex"); - } - - return Buffer.from(hex, "hex").toString(type); - } - } - - const create = (wasmModule, instancesPool, chunkSize, digestSize) => { - if (instancesPool.length > 0) { - const old = instancesPool.pop(); - - old.reset(); - - return old; - } else { - return new WasmHash( - new WebAssembly.Instance(wasmModule), - instancesPool, - chunkSize, - digestSize - ); - } - }; - - wasmHash.exports = create; - wasmHash.exports.MAX_SHORT_STRING = MAX_SHORT_STRING; - return wasmHash.exports; -} - -/* - MIT License http://www.opensource.org/licenses/mit-license.php - Author Tobias Koppers @sokra -*/ - -var xxhash64_1; -var hasRequiredXxhash64; - -function requireXxhash64 () { - if (hasRequiredXxhash64) return xxhash64_1; - hasRequiredXxhash64 = 1; - - const create = requireWasmHash(); - - //#region wasm code: xxhash64 (../../../assembly/hash/xxhash64.asm.ts) --initialMemory 1 - const xxhash64 = new WebAssembly.Module( - Buffer.from( - // 1173 bytes - "AGFzbQEAAAABCAJgAX8AYAAAAwQDAQAABQMBAAEGGgV+AUIAC34BQgALfgFCAAt+AUIAC34BQgALByIEBGluaXQAAAZ1cGRhdGUAAQVmaW5hbAACBm1lbW9yeQIACrUIAzAAQtbrgu7q/Yn14AAkAELP1tO+0ser2UIkAUIAJAJC+erQ0OfJoeThACQDQgAkBAvUAQIBfwR+IABFBEAPCyMEIACtfCQEIwAhAiMBIQMjAiEEIwMhBQNAIAIgASkDAELP1tO+0ser2UJ+fEIfiUKHla+vmLbem55/fiECIAMgASkDCELP1tO+0ser2UJ+fEIfiUKHla+vmLbem55/fiEDIAQgASkDEELP1tO+0ser2UJ+fEIfiUKHla+vmLbem55/fiEEIAUgASkDGELP1tO+0ser2UJ+fEIfiUKHla+vmLbem55/fiEFIAAgAUEgaiIBSw0ACyACJAAgAyQBIAQkAiAFJAMLqwYCAX8EfiMEQgBSBH4jACICQgGJIwEiA0IHiXwjAiIEQgyJfCMDIgVCEol8IAJCz9bTvtLHq9lCfkIfiUKHla+vmLbem55/foVCh5Wvr5i23puef35CnaO16oOxjYr6AH0gA0LP1tO+0ser2UJ+Qh+JQoeVr6+Ytt6bnn9+hUKHla+vmLbem55/fkKdo7Xqg7GNivoAfSAEQs/W077Sx6vZQn5CH4lCh5Wvr5i23puef36FQoeVr6+Ytt6bnn9+Qp2jteqDsY2K+gB9IAVCz9bTvtLHq9lCfkIfiUKHla+vmLbem55/foVCh5Wvr5i23puef35CnaO16oOxjYr6AH0FQsXP2bLx5brqJwsjBCAArXx8IQIDQCABQQhqIABNBEAgAiABKQMAQs/W077Sx6vZQn5CH4lCh5Wvr5i23puef36FQhuJQoeVr6+Ytt6bnn9+Qp2jteqDsY2K+gB9IQIgAUEIaiEBDAELCyABQQRqIABNBEACfyACIAE1AgBCh5Wvr5i23puef36FQheJQs/W077Sx6vZQn5C+fPd8Zn2masWfCECIAFBBGoLIQELA0AgACABRwRAIAIgATEAAELFz9my8eW66id+hUILiUKHla+vmLbem55/fiECIAFBAWohAQwBCwtBACACIAJCIYiFQs/W077Sx6vZQn4iAiACQh2IhUL5893xmfaZqxZ+IgIgAkIgiIUiAkIgiCIDQv//A4NCIIYgA0KAgPz/D4NCEIiEIgNC/4GAgPAfg0IQhiADQoD+g4CA4D+DQgiIhCIDQo+AvIDwgcAHg0IIhiADQvCBwIeAnoD4AINCBIiEIgNChoyYsODAgYMGfEIEiEKBgoSIkKDAgAGDQid+IANCsODAgYOGjJgwhHw3AwBBCCACQv////8PgyICQv//A4NCIIYgAkKAgPz/D4NCEIiEIgJC/4GAgPAfg0IQhiACQoD+g4CA4D+DQgiIhCICQo+AvIDwgcAHg0IIhiACQvCBwIeAnoD4AINCBIiEIgJChoyYsODAgYMGfEIEiEKBgoSIkKDAgAGDQid+IAJCsODAgYOGjJgwhHw3AwAL", - "base64" - ) - ); - //#endregion - - xxhash64_1 = create.bind(null, xxhash64, [], 32, 16); - return xxhash64_1; -} - -var BatchedHash_1; -var hasRequiredBatchedHash; - -function requireBatchedHash () { - if (hasRequiredBatchedHash) return BatchedHash_1; - hasRequiredBatchedHash = 1; - const MAX_SHORT_STRING = requireWasmHash().MAX_SHORT_STRING; - - class BatchedHash { - constructor(hash) { - this.string = undefined; - this.encoding = undefined; - this.hash = hash; - } - - /** - * Update hash {@link https://nodejs.org/api/crypto.html#crypto_hash_update_data_inputencoding} - * @param {string|Buffer} data data - * @param {string=} inputEncoding data encoding - * @returns {this} updated hash - */ - update(data, inputEncoding) { - if (this.string !== undefined) { - if ( - typeof data === "string" && - inputEncoding === this.encoding && - this.string.length + data.length < MAX_SHORT_STRING - ) { - this.string += data; - - return this; - } - - this.hash.update(this.string, this.encoding); - this.string = undefined; - } - - if (typeof data === "string") { - if ( - data.length < MAX_SHORT_STRING && - // base64 encoding is not valid since it may contain padding chars - (!inputEncoding || !inputEncoding.startsWith("ba")) - ) { - this.string = data; - this.encoding = inputEncoding; - } else { - this.hash.update(data, inputEncoding); - } - } else { - this.hash.update(data); - } - - return this; - } - - /** - * Calculates the digest {@link https://nodejs.org/api/crypto.html#crypto_hash_digest_encoding} - * @param {string=} encoding encoding of the return value - * @returns {string|Buffer} digest - */ - digest(encoding) { - if (this.string !== undefined) { - this.hash.update(this.string, this.encoding); - } - - return this.hash.digest(encoding); - } - } - - BatchedHash_1 = BatchedHash; - return BatchedHash_1; -} - -/* - MIT License http://www.opensource.org/licenses/mit-license.php - Author Tobias Koppers @sokra -*/ - -var md4_1; -var hasRequiredMd4; - -function requireMd4 () { - if (hasRequiredMd4) return md4_1; - hasRequiredMd4 = 1; - - const create = requireWasmHash(); - - //#region wasm code: md4 (../../../assembly/hash/md4.asm.ts) --initialMemory 1 - const md4 = new WebAssembly.Module( - Buffer.from( - // 2150 bytes - "AGFzbQEAAAABCAJgAX8AYAAAAwUEAQAAAAUDAQABBhoFfwFBAAt/AUEAC38BQQALfwFBAAt/AUEACwciBARpbml0AAAGdXBkYXRlAAIFZmluYWwAAwZtZW1vcnkCAAqFEAQmAEGBxpS6BiQBQYnXtv5+JAJB/rnrxXkkA0H2qMmBASQEQQAkAAvMCgEYfyMBIQojAiEGIwMhByMEIQgDQCAAIAVLBEAgBSgCCCINIAcgBiAFKAIEIgsgCCAHIAUoAgAiDCAKIAggBiAHIAhzcXNqakEDdyIDIAYgB3Nxc2pqQQd3IgEgAyAGc3FzampBC3chAiAFKAIUIg8gASACIAUoAhAiCSADIAEgBSgCDCIOIAYgAyACIAEgA3Nxc2pqQRN3IgQgASACc3FzampBA3ciAyACIARzcXNqakEHdyEBIAUoAiAiEiADIAEgBSgCHCIRIAQgAyAFKAIYIhAgAiAEIAEgAyAEc3FzampBC3ciAiABIANzcXNqakETdyIEIAEgAnNxc2pqQQN3IQMgBSgCLCIVIAQgAyAFKAIoIhQgAiAEIAUoAiQiEyABIAIgAyACIARzcXNqakEHdyIBIAMgBHNxc2pqQQt3IgIgASADc3FzampBE3chBCAPIBAgCSAVIBQgEyAFKAI4IhYgAiAEIAUoAjQiFyABIAIgBSgCMCIYIAMgASAEIAEgAnNxc2pqQQN3IgEgAiAEc3FzampBB3ciAiABIARzcXNqakELdyIDIAkgAiAMIAEgBSgCPCIJIAQgASADIAEgAnNxc2pqQRN3IgEgAiADcnEgAiADcXJqakGZ84nUBWpBA3ciAiABIANycSABIANxcmpqQZnzidQFakEFdyIEIAEgAnJxIAEgAnFyaiASakGZ84nUBWpBCXciAyAPIAQgCyACIBggASADIAIgBHJxIAIgBHFyampBmfOJ1AVqQQ13IgEgAyAEcnEgAyAEcXJqakGZ84nUBWpBA3ciAiABIANycSABIANxcmpqQZnzidQFakEFdyIEIAEgAnJxIAEgAnFyampBmfOJ1AVqQQl3IgMgECAEIAIgFyABIAMgAiAEcnEgAiAEcXJqakGZ84nUBWpBDXciASADIARycSADIARxcmogDWpBmfOJ1AVqQQN3IgIgASADcnEgASADcXJqakGZ84nUBWpBBXciBCABIAJycSABIAJxcmpqQZnzidQFakEJdyIDIBEgBCAOIAIgFiABIAMgAiAEcnEgAiAEcXJqakGZ84nUBWpBDXciASADIARycSADIARxcmpqQZnzidQFakEDdyICIAEgA3JxIAEgA3FyampBmfOJ1AVqQQV3IgQgASACcnEgASACcXJqakGZ84nUBWpBCXciAyAMIAIgAyAJIAEgAyACIARycSACIARxcmpqQZnzidQFakENdyIBcyAEc2pqQaHX5/YGakEDdyICIAQgASACcyADc2ogEmpBodfn9gZqQQl3IgRzIAFzampBodfn9gZqQQt3IgMgAiADIBggASADIARzIAJzampBodfn9gZqQQ93IgFzIARzaiANakGh1+f2BmpBA3ciAiAUIAQgASACcyADc2pqQaHX5/YGakEJdyIEcyABc2pqQaHX5/YGakELdyIDIAsgAiADIBYgASADIARzIAJzampBodfn9gZqQQ93IgFzIARzampBodfn9gZqQQN3IgIgEyAEIAEgAnMgA3NqakGh1+f2BmpBCXciBHMgAXNqakGh1+f2BmpBC3chAyAKIA4gAiADIBcgASADIARzIAJzampBodfn9gZqQQ93IgFzIARzampBodfn9gZqQQN3IgJqIQogBiAJIAEgESADIAIgFSAEIAEgAnMgA3NqakGh1+f2BmpBCXciBHMgAXNqakGh1+f2BmpBC3ciAyAEcyACc2pqQaHX5/YGakEPd2ohBiADIAdqIQcgBCAIaiEIIAVBQGshBQwBCwsgCiQBIAYkAiAHJAMgCCQECw0AIAAQASMAIABqJAAL/wQCA38BfiMAIABqrUIDhiEEIABByABqQUBxIgJBCGshAyAAIgFBAWohACABQYABOgAAA0AgACACSUEAIABBB3EbBEAgAEEAOgAAIABBAWohAAwBCwsDQCAAIAJJBEAgAEIANwMAIABBCGohAAwBCwsgAyAENwMAIAIQAUEAIwGtIgRC//8DgyAEQoCA/P8Pg0IQhoQiBEL/gYCA8B+DIARCgP6DgIDgP4NCCIaEIgRCj4C8gPCBwAeDQgiGIARC8IHAh4CegPgAg0IEiIQiBEKGjJiw4MCBgwZ8QgSIQoGChIiQoMCAAYNCJ34gBEKw4MCBg4aMmDCEfDcDAEEIIwKtIgRC//8DgyAEQoCA/P8Pg0IQhoQiBEL/gYCA8B+DIARCgP6DgIDgP4NCCIaEIgRCj4C8gPCBwAeDQgiGIARC8IHAh4CegPgAg0IEiIQiBEKGjJiw4MCBgwZ8QgSIQoGChIiQoMCAAYNCJ34gBEKw4MCBg4aMmDCEfDcDAEEQIwOtIgRC//8DgyAEQoCA/P8Pg0IQhoQiBEL/gYCA8B+DIARCgP6DgIDgP4NCCIaEIgRCj4C8gPCBwAeDQgiGIARC8IHAh4CegPgAg0IEiIQiBEKGjJiw4MCBgwZ8QgSIQoGChIiQoMCAAYNCJ34gBEKw4MCBg4aMmDCEfDcDAEEYIwStIgRC//8DgyAEQoCA/P8Pg0IQhoQiBEL/gYCA8B+DIARCgP6DgIDgP4NCCIaEIgRCj4C8gPCBwAeDQgiGIARC8IHAh4CegPgAg0IEiIQiBEKGjJiw4MCBgwZ8QgSIQoGChIiQoMCAAYNCJ34gBEKw4MCBg4aMmDCEfDcDAAs=", - "base64" - ) - ); - //#endregion - - md4_1 = create.bind(null, md4, [], 64, 32); - return md4_1; -} - -var BulkUpdateDecorator_1; -var hasRequiredBulkUpdateDecorator; - -function requireBulkUpdateDecorator () { - if (hasRequiredBulkUpdateDecorator) return BulkUpdateDecorator_1; - hasRequiredBulkUpdateDecorator = 1; - const BULK_SIZE = 2000; - - // We are using an object instead of a Map as this will stay static during the runtime - // so access to it can be optimized by v8 - const digestCaches = {}; - - class BulkUpdateDecorator { - /** - * @param {Hash | function(): Hash} hashOrFactory function to create a hash - * @param {string=} hashKey key for caching - */ - constructor(hashOrFactory, hashKey) { - this.hashKey = hashKey; - - if (typeof hashOrFactory === "function") { - this.hashFactory = hashOrFactory; - this.hash = undefined; - } else { - this.hashFactory = undefined; - this.hash = hashOrFactory; - } - - this.buffer = ""; - } - - /** - * Update hash {@link https://nodejs.org/api/crypto.html#crypto_hash_update_data_inputencoding} - * @param {string|Buffer} data data - * @param {string=} inputEncoding data encoding - * @returns {this} updated hash - */ - update(data, inputEncoding) { - if ( - inputEncoding !== undefined || - typeof data !== "string" || - data.length > BULK_SIZE - ) { - if (this.hash === undefined) { - this.hash = this.hashFactory(); - } - - if (this.buffer.length > 0) { - this.hash.update(this.buffer); - this.buffer = ""; - } - - this.hash.update(data, inputEncoding); - } else { - this.buffer += data; - - if (this.buffer.length > BULK_SIZE) { - if (this.hash === undefined) { - this.hash = this.hashFactory(); - } - - this.hash.update(this.buffer); - this.buffer = ""; - } - } - - return this; - } - - /** - * Calculates the digest {@link https://nodejs.org/api/crypto.html#crypto_hash_digest_encoding} - * @param {string=} encoding encoding of the return value - * @returns {string|Buffer} digest - */ - digest(encoding) { - let digestCache; - - const buffer = this.buffer; - - if (this.hash === undefined) { - // short data for hash, we can use caching - const cacheKey = `${this.hashKey}-${encoding}`; - - digestCache = digestCaches[cacheKey]; - - if (digestCache === undefined) { - digestCache = digestCaches[cacheKey] = new Map(); - } - - const cacheEntry = digestCache.get(buffer); - - if (cacheEntry !== undefined) { - return cacheEntry; - } - - this.hash = this.hashFactory(); - } - - if (buffer.length > 0) { - this.hash.update(buffer); - } - - const digestResult = this.hash.digest(encoding); - - if (digestCache !== undefined) { - digestCache.set(buffer, digestResult); - } - - return digestResult; - } - } - - BulkUpdateDecorator_1 = BulkUpdateDecorator; - return BulkUpdateDecorator_1; -} - -const baseEncodeTables = { - 26: "abcdefghijklmnopqrstuvwxyz", - 32: "123456789abcdefghjkmnpqrstuvwxyz", // no 0lio - 36: "0123456789abcdefghijklmnopqrstuvwxyz", - 49: "abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ", // no lIO - 52: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ", - 58: "123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ", // no 0lIO - 62: "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ", - 64: "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_", -}; - -/** - * @param {Uint32Array} uint32Array Treated as a long base-0x100000000 number, little endian - * @param {number} divisor The divisor - * @return {number} Modulo (remainder) of the division - */ -function divmod32(uint32Array, divisor) { - let carry = 0; - for (let i = uint32Array.length - 1; i >= 0; i--) { - const value = carry * 0x100000000 + uint32Array[i]; - carry = value % divisor; - uint32Array[i] = Math.floor(value / divisor); - } - return carry; -} - -function encodeBufferToBase(buffer, base, length) { - const encodeTable = baseEncodeTables[base]; - - if (!encodeTable) { - throw new Error("Unknown encoding base" + base); - } - - // Input bits are only enough to generate this many characters - const limit = Math.ceil((buffer.length * 8) / Math.log2(base)); - length = Math.min(length, limit); - - // Most of the crypto digests (if not all) has length a multiple of 4 bytes. - // Fewer numbers in the array means faster math. - const uint32Array = new Uint32Array(Math.ceil(buffer.length / 4)); - - // Make sure the input buffer data is copied and is not mutated by reference. - // divmod32() would corrupt the BulkUpdateDecorator cache otherwise. - buffer.copy(Buffer.from(uint32Array.buffer)); - - let output = ""; - - for (let i = 0; i < length; i++) { - output = encodeTable[divmod32(uint32Array, base)] + output; - } - - return output; -} - -let crypto = undefined; -let createXXHash64 = undefined; -let createMd4 = undefined; -let BatchedHash = undefined; -let BulkUpdateDecorator = undefined; - -function getHashDigest$1(buffer, algorithm, digestType, maxLength) { - algorithm = algorithm || "xxhash64"; - maxLength = maxLength || 9999; - - let hash; - - if (algorithm === "xxhash64") { - if (createXXHash64 === undefined) { - createXXHash64 = requireXxhash64(); - - if (BatchedHash === undefined) { - BatchedHash = requireBatchedHash(); - } - } - - hash = new BatchedHash(createXXHash64()); - } else if (algorithm === "md4") { - if (createMd4 === undefined) { - createMd4 = requireMd4(); - - if (BatchedHash === undefined) { - BatchedHash = requireBatchedHash(); - } - } - - hash = new BatchedHash(createMd4()); - } else if (algorithm === "native-md4") { - if (typeof crypto === "undefined") { - crypto = require$$3$1__default; - - if (BulkUpdateDecorator === undefined) { - BulkUpdateDecorator = requireBulkUpdateDecorator(); - } - } - - hash = new BulkUpdateDecorator(() => crypto.createHash("md4"), "md4"); - } else { - if (typeof crypto === "undefined") { - crypto = require$$3$1__default; - - if (BulkUpdateDecorator === undefined) { - BulkUpdateDecorator = requireBulkUpdateDecorator(); - } - } - - hash = new BulkUpdateDecorator( - () => crypto.createHash(algorithm), - algorithm - ); - } - - hash.update(buffer); - - if ( - digestType === "base26" || - digestType === "base32" || - digestType === "base36" || - digestType === "base49" || - digestType === "base52" || - digestType === "base58" || - digestType === "base62" - ) { - return encodeBufferToBase(hash.digest(), digestType.substr(4), maxLength); - } else { - return hash.digest(digestType || "hex").substr(0, maxLength); - } -} - -var getHashDigest_1 = getHashDigest$1; - -const path$1 = require$$0$4; -const getHashDigest = getHashDigest_1; - -function interpolateName$1(loaderContext, name, options = {}) { - let filename; - - const hasQuery = - loaderContext.resourceQuery && loaderContext.resourceQuery.length > 1; - - if (typeof name === "function") { - filename = name( - loaderContext.resourcePath, - hasQuery ? loaderContext.resourceQuery : undefined - ); - } else { - filename = name || "[hash].[ext]"; - } - - const context = options.context; - const content = options.content; - const regExp = options.regExp; - - let ext = "bin"; - let basename = "file"; - let directory = ""; - let folder = ""; - let query = ""; - - if (loaderContext.resourcePath) { - const parsed = path$1.parse(loaderContext.resourcePath); - let resourcePath = loaderContext.resourcePath; - - if (parsed.ext) { - ext = parsed.ext.substr(1); - } - - if (parsed.dir) { - basename = parsed.name; - resourcePath = parsed.dir + path$1.sep; - } - - if (typeof context !== "undefined") { - directory = path$1 - .relative(context, resourcePath + "_") - .replace(/\\/g, "/") - .replace(/\.\.(\/)?/g, "_$1"); - directory = directory.substr(0, directory.length - 1); - } else { - directory = resourcePath.replace(/\\/g, "/").replace(/\.\.(\/)?/g, "_$1"); - } - - if (directory.length === 1) { - directory = ""; - } else if (directory.length > 1) { - folder = path$1.basename(directory); - } - } - - if (loaderContext.resourceQuery && loaderContext.resourceQuery.length > 1) { - query = loaderContext.resourceQuery; - - const hashIdx = query.indexOf("#"); - - if (hashIdx >= 0) { - query = query.substr(0, hashIdx); - } - } - - let url = filename; - - if (content) { - // Match hash template - url = url - // `hash` and `contenthash` are same in `loader-utils` context - // let's keep `hash` for backward compatibility - .replace( - /\[(?:([^:\]]+):)?(?:hash|contenthash)(?::([a-z]+\d*))?(?::(\d+))?\]/gi, - (all, hashType, digestType, maxLength) => - getHashDigest(content, hashType, digestType, parseInt(maxLength, 10)) - ); - } - - url = url - .replace(/\[ext\]/gi, () => ext) - .replace(/\[name\]/gi, () => basename) - .replace(/\[path\]/gi, () => directory) - .replace(/\[folder\]/gi, () => folder) - .replace(/\[query\]/gi, () => query); - - if (regExp && loaderContext.resourcePath) { - const match = loaderContext.resourcePath.match(new RegExp(regExp)); - - match && - match.forEach((matched, i) => { - url = url.replace(new RegExp("\\[" + i + "\\]", "ig"), matched); - }); - } - - if ( - typeof loaderContext.options === "object" && - typeof loaderContext.options.customInterpolateName === "function" - ) { - url = loaderContext.options.customInterpolateName.call( - loaderContext, - url, - name, - options - ); - } - - return url; -} - -var interpolateName_1 = interpolateName$1; - -var interpolateName = interpolateName_1; -var path = require$$0$4; - -/** - * @param {string} pattern - * @param {object} options - * @param {string} options.context - * @param {string} options.hashPrefix - * @return {function} - */ -var genericNames = function createGenerator(pattern, options) { - options = options || {}; - var context = - options && typeof options.context === "string" - ? options.context - : process.cwd(); - var hashPrefix = - options && typeof options.hashPrefix === "string" ? options.hashPrefix : ""; - - /** - * @param {string} localName Usually a class name - * @param {string} filepath Absolute path - * @return {string} - */ - return function generate(localName, filepath) { - var name = pattern.replace(/\[local\]/gi, localName); - var loaderContext = { - resourcePath: filepath, - }; - - var loaderOptions = { - content: - hashPrefix + - path.relative(context, filepath).replace(/\\/g, "/") + - "\x00" + - localName, - context: context, - }; - - var genericName = interpolateName(loaderContext, name, loaderOptions); - return genericName - .replace(new RegExp("[^a-zA-Z0-9\\-_\u00A0-\uFFFF]", "g"), "-") - .replace(/^((-?[0-9])|--)/, "_$1"); - }; -}; - -var src$2 = {exports: {}}; - -var dist = {exports: {}}; - -var processor = {exports: {}}; - -var parser = {exports: {}}; - -var root$1 = {exports: {}}; - -var container = {exports: {}}; - -var node$1 = {exports: {}}; - -var util = {}; - -var unesc = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = unesc; - - // Many thanks for this post which made this migration much easier. - // https://mathiasbynens.be/notes/css-escapes - - /** - * - * @param {string} str - * @returns {[string, number]|undefined} - */ - function gobbleHex(str) { - var lower = str.toLowerCase(); - var hex = ''; - var spaceTerminated = false; - - for (var i = 0; i < 6 && lower[i] !== undefined; i++) { - var code = lower.charCodeAt(i); // check to see if we are dealing with a valid hex char [a-f|0-9] - - var valid = code >= 97 && code <= 102 || code >= 48 && code <= 57; // https://drafts.csswg.org/css-syntax/#consume-escaped-code-point - - spaceTerminated = code === 32; - - if (!valid) { - break; - } - - hex += lower[i]; - } - - if (hex.length === 0) { - return undefined; - } - - var codePoint = parseInt(hex, 16); - var isSurrogate = codePoint >= 0xD800 && codePoint <= 0xDFFF; // Add special case for - // "If this number is zero, or is for a surrogate, or is greater than the maximum allowed code point" - // https://drafts.csswg.org/css-syntax/#maximum-allowed-code-point - - if (isSurrogate || codePoint === 0x0000 || codePoint > 0x10FFFF) { - return ["\uFFFD", hex.length + (spaceTerminated ? 1 : 0)]; - } - - return [String.fromCodePoint(codePoint), hex.length + (spaceTerminated ? 1 : 0)]; - } - - var CONTAINS_ESCAPE = /\\/; - - function unesc(str) { - var needToProcess = CONTAINS_ESCAPE.test(str); - - if (!needToProcess) { - return str; - } - - var ret = ""; - - for (var i = 0; i < str.length; i++) { - if (str[i] === "\\") { - var gobbled = gobbleHex(str.slice(i + 1, i + 7)); - - if (gobbled !== undefined) { - ret += gobbled[0]; - i += gobbled[1]; - continue; - } // Retain a pair of \\ if double escaped `\\\\` - // https://github.com/postcss/postcss-selector-parser/commit/268c9a7656fb53f543dc620aa5b73a30ec3ff20e - - - if (str[i + 1] === "\\") { - ret += "\\"; - i++; - continue; - } // if \\ is at the end of the string retain it - // https://github.com/postcss/postcss-selector-parser/commit/01a6b346e3612ce1ab20219acc26abdc259ccefb - - - if (str.length === i + 1) { - ret += str[i]; - } - - continue; - } - - ret += str[i]; - } - - return ret; - } - - module.exports = exports.default; -} (unesc, unesc.exports)); - -var unescExports = unesc.exports; - -var getProp = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = getProp; - - function getProp(obj) { - for (var _len = arguments.length, props = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) { - props[_key - 1] = arguments[_key]; - } - - while (props.length > 0) { - var prop = props.shift(); - - if (!obj[prop]) { - return undefined; - } - - obj = obj[prop]; - } - - return obj; - } - - module.exports = exports.default; -} (getProp, getProp.exports)); - -var getPropExports = getProp.exports; - -var ensureObject = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = ensureObject; - - function ensureObject(obj) { - for (var _len = arguments.length, props = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) { - props[_key - 1] = arguments[_key]; - } - - while (props.length > 0) { - var prop = props.shift(); - - if (!obj[prop]) { - obj[prop] = {}; - } - - obj = obj[prop]; - } - } - - module.exports = exports.default; -} (ensureObject, ensureObject.exports)); - -var ensureObjectExports = ensureObject.exports; - -var stripComments = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = stripComments; - - function stripComments(str) { - var s = ""; - var commentStart = str.indexOf("/*"); - var lastEnd = 0; - - while (commentStart >= 0) { - s = s + str.slice(lastEnd, commentStart); - var commentEnd = str.indexOf("*/", commentStart + 2); - - if (commentEnd < 0) { - return s; - } - - lastEnd = commentEnd + 2; - commentStart = str.indexOf("/*", lastEnd); - } - - s = s + str.slice(lastEnd); - return s; - } - - module.exports = exports.default; -} (stripComments, stripComments.exports)); - -var stripCommentsExports = stripComments.exports; - -util.__esModule = true; -util.stripComments = util.ensureObject = util.getProp = util.unesc = void 0; - -var _unesc = _interopRequireDefault$3(unescExports); - -util.unesc = _unesc["default"]; - -var _getProp = _interopRequireDefault$3(getPropExports); - -util.getProp = _getProp["default"]; - -var _ensureObject = _interopRequireDefault$3(ensureObjectExports); - -util.ensureObject = _ensureObject["default"]; - -var _stripComments = _interopRequireDefault$3(stripCommentsExports); - -util.stripComments = _stripComments["default"]; - -function _interopRequireDefault$3(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _util = util; - - function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } - - function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; } - - var cloneNode = function cloneNode(obj, parent) { - if (typeof obj !== 'object' || obj === null) { - return obj; - } - - var cloned = new obj.constructor(); - - for (var i in obj) { - if (!obj.hasOwnProperty(i)) { - continue; - } - - var value = obj[i]; - var type = typeof value; - - if (i === 'parent' && type === 'object') { - if (parent) { - cloned[i] = parent; - } - } else if (value instanceof Array) { - cloned[i] = value.map(function (j) { - return cloneNode(j, cloned); - }); - } else { - cloned[i] = cloneNode(value, cloned); - } - } - - return cloned; - }; - - var Node = /*#__PURE__*/function () { - function Node(opts) { - if (opts === void 0) { - opts = {}; - } - - Object.assign(this, opts); - this.spaces = this.spaces || {}; - this.spaces.before = this.spaces.before || ''; - this.spaces.after = this.spaces.after || ''; - } - - var _proto = Node.prototype; - - _proto.remove = function remove() { - if (this.parent) { - this.parent.removeChild(this); - } - - this.parent = undefined; - return this; - }; - - _proto.replaceWith = function replaceWith() { - if (this.parent) { - for (var index in arguments) { - this.parent.insertBefore(this, arguments[index]); - } - - this.remove(); - } - - return this; - }; - - _proto.next = function next() { - return this.parent.at(this.parent.index(this) + 1); - }; - - _proto.prev = function prev() { - return this.parent.at(this.parent.index(this) - 1); - }; - - _proto.clone = function clone(overrides) { - if (overrides === void 0) { - overrides = {}; - } - - var cloned = cloneNode(this); - - for (var name in overrides) { - cloned[name] = overrides[name]; - } - - return cloned; - } - /** - * Some non-standard syntax doesn't follow normal escaping rules for css. - * This allows non standard syntax to be appended to an existing property - * by specifying the escaped value. By specifying the escaped value, - * illegal characters are allowed to be directly inserted into css output. - * @param {string} name the property to set - * @param {any} value the unescaped value of the property - * @param {string} valueEscaped optional. the escaped value of the property. - */ - ; - - _proto.appendToPropertyAndEscape = function appendToPropertyAndEscape(name, value, valueEscaped) { - if (!this.raws) { - this.raws = {}; - } - - var originalValue = this[name]; - var originalEscaped = this.raws[name]; - this[name] = originalValue + value; // this may trigger a setter that updates raws, so it has to be set first. - - if (originalEscaped || valueEscaped !== value) { - this.raws[name] = (originalEscaped || originalValue) + valueEscaped; - } else { - delete this.raws[name]; // delete any escaped value that was created by the setter. - } - } - /** - * Some non-standard syntax doesn't follow normal escaping rules for css. - * This allows the escaped value to be specified directly, allowing illegal - * characters to be directly inserted into css output. - * @param {string} name the property to set - * @param {any} value the unescaped value of the property - * @param {string} valueEscaped the escaped value of the property. - */ - ; - - _proto.setPropertyAndEscape = function setPropertyAndEscape(name, value, valueEscaped) { - if (!this.raws) { - this.raws = {}; - } - - this[name] = value; // this may trigger a setter that updates raws, so it has to be set first. - - this.raws[name] = valueEscaped; - } - /** - * When you want a value to passed through to CSS directly. This method - * deletes the corresponding raw value causing the stringifier to fallback - * to the unescaped value. - * @param {string} name the property to set. - * @param {any} value The value that is both escaped and unescaped. - */ - ; - - _proto.setPropertyWithoutEscape = function setPropertyWithoutEscape(name, value) { - this[name] = value; // this may trigger a setter that updates raws, so it has to be set first. - - if (this.raws) { - delete this.raws[name]; - } - } - /** - * - * @param {number} line The number (starting with 1) - * @param {number} column The column number (starting with 1) - */ - ; - - _proto.isAtPosition = function isAtPosition(line, column) { - if (this.source && this.source.start && this.source.end) { - if (this.source.start.line > line) { - return false; - } - - if (this.source.end.line < line) { - return false; - } - - if (this.source.start.line === line && this.source.start.column > column) { - return false; - } - - if (this.source.end.line === line && this.source.end.column < column) { - return false; - } - - return true; - } - - return undefined; - }; - - _proto.stringifyProperty = function stringifyProperty(name) { - return this.raws && this.raws[name] || this[name]; - }; - - _proto.valueToString = function valueToString() { - return String(this.stringifyProperty("value")); - }; - - _proto.toString = function toString() { - return [this.rawSpaceBefore, this.valueToString(), this.rawSpaceAfter].join(''); - }; - - _createClass(Node, [{ - key: "rawSpaceBefore", - get: function get() { - var rawSpace = this.raws && this.raws.spaces && this.raws.spaces.before; - - if (rawSpace === undefined) { - rawSpace = this.spaces && this.spaces.before; - } - - return rawSpace || ""; - }, - set: function set(raw) { - (0, _util.ensureObject)(this, "raws", "spaces"); - this.raws.spaces.before = raw; - } - }, { - key: "rawSpaceAfter", - get: function get() { - var rawSpace = this.raws && this.raws.spaces && this.raws.spaces.after; - - if (rawSpace === undefined) { - rawSpace = this.spaces.after; - } - - return rawSpace || ""; - }, - set: function set(raw) { - (0, _util.ensureObject)(this, "raws", "spaces"); - this.raws.spaces.after = raw; - } - }]); - - return Node; - }(); - - exports["default"] = Node; - module.exports = exports.default; -} (node$1, node$1.exports)); - -var nodeExports = node$1.exports; - -var types = {}; - -types.__esModule = true; -types.UNIVERSAL = types.ATTRIBUTE = types.CLASS = types.COMBINATOR = types.COMMENT = types.ID = types.NESTING = types.PSEUDO = types.ROOT = types.SELECTOR = types.STRING = types.TAG = void 0; -var TAG = 'tag'; -types.TAG = TAG; -var STRING = 'string'; -types.STRING = STRING; -var SELECTOR = 'selector'; -types.SELECTOR = SELECTOR; -var ROOT = 'root'; -types.ROOT = ROOT; -var PSEUDO = 'pseudo'; -types.PSEUDO = PSEUDO; -var NESTING = 'nesting'; -types.NESTING = NESTING; -var ID = 'id'; -types.ID = ID; -var COMMENT = 'comment'; -types.COMMENT = COMMENT; -var COMBINATOR = 'combinator'; -types.COMBINATOR = COMBINATOR; -var CLASS = 'class'; -types.CLASS = CLASS; -var ATTRIBUTE = 'attribute'; -types.ATTRIBUTE = ATTRIBUTE; -var UNIVERSAL = 'universal'; -types.UNIVERSAL = UNIVERSAL; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _node = _interopRequireDefault(nodeExports); - - var types$1 = _interopRequireWildcard(types); - - function _getRequireWildcardCache() { if (typeof WeakMap !== "function") return null; var cache = new WeakMap(); _getRequireWildcardCache = function _getRequireWildcardCache() { return cache; }; return cache; } - - function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } if (obj === null || typeof obj !== "object" && typeof obj !== "function") { return { "default": obj }; } var cache = _getRequireWildcardCache(); if (cache && cache.has(obj)) { return cache.get(obj); } var newObj = {}; var hasPropertyDescriptor = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { var desc = hasPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : null; if (desc && (desc.get || desc.set)) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } newObj["default"] = obj; if (cache) { cache.set(obj, newObj); } return newObj; } - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _createForOfIteratorHelperLoose(o, allowArrayLike) { var it; if (typeof Symbol === "undefined" || o[Symbol.iterator] == null) { if (Array.isArray(o) || (it = _unsupportedIterableToArray(o)) || allowArrayLike && o && typeof o.length === "number") { if (it) o = it; var i = 0; return function () { if (i >= o.length) return { done: true }; return { done: false, value: o[i++] }; }; } throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method."); } it = o[Symbol.iterator](); return it.next.bind(it); } - - function _unsupportedIterableToArray(o, minLen) { if (!o) return; if (typeof o === "string") return _arrayLikeToArray(o, minLen); var n = Object.prototype.toString.call(o).slice(8, -1); if (n === "Object" && o.constructor) n = o.constructor.name; if (n === "Map" || n === "Set") return Array.from(o); if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen); } - - function _arrayLikeToArray(arr, len) { if (len == null || len > arr.length) len = arr.length; for (var i = 0, arr2 = new Array(len); i < len; i++) { arr2[i] = arr[i]; } return arr2; } - - function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } - - function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var Container = /*#__PURE__*/function (_Node) { - _inheritsLoose(Container, _Node); - - function Container(opts) { - var _this; - - _this = _Node.call(this, opts) || this; - - if (!_this.nodes) { - _this.nodes = []; - } - - return _this; - } - - var _proto = Container.prototype; - - _proto.append = function append(selector) { - selector.parent = this; - this.nodes.push(selector); - return this; - }; - - _proto.prepend = function prepend(selector) { - selector.parent = this; - this.nodes.unshift(selector); - return this; - }; - - _proto.at = function at(index) { - return this.nodes[index]; - }; - - _proto.index = function index(child) { - if (typeof child === 'number') { - return child; - } - - return this.nodes.indexOf(child); - }; - - _proto.removeChild = function removeChild(child) { - child = this.index(child); - this.at(child).parent = undefined; - this.nodes.splice(child, 1); - var index; - - for (var id in this.indexes) { - index = this.indexes[id]; - - if (index >= child) { - this.indexes[id] = index - 1; - } - } - - return this; - }; - - _proto.removeAll = function removeAll() { - for (var _iterator = _createForOfIteratorHelperLoose(this.nodes), _step; !(_step = _iterator()).done;) { - var node = _step.value; - node.parent = undefined; - } - - this.nodes = []; - return this; - }; - - _proto.empty = function empty() { - return this.removeAll(); - }; - - _proto.insertAfter = function insertAfter(oldNode, newNode) { - newNode.parent = this; - var oldIndex = this.index(oldNode); - this.nodes.splice(oldIndex + 1, 0, newNode); - newNode.parent = this; - var index; - - for (var id in this.indexes) { - index = this.indexes[id]; - - if (oldIndex <= index) { - this.indexes[id] = index + 1; - } - } - - return this; - }; - - _proto.insertBefore = function insertBefore(oldNode, newNode) { - newNode.parent = this; - var oldIndex = this.index(oldNode); - this.nodes.splice(oldIndex, 0, newNode); - newNode.parent = this; - var index; - - for (var id in this.indexes) { - index = this.indexes[id]; - - if (index <= oldIndex) { - this.indexes[id] = index + 1; - } - } - - return this; - }; - - _proto._findChildAtPosition = function _findChildAtPosition(line, col) { - var found = undefined; - this.each(function (node) { - if (node.atPosition) { - var foundChild = node.atPosition(line, col); - - if (foundChild) { - found = foundChild; - return false; - } - } else if (node.isAtPosition(line, col)) { - found = node; - return false; - } - }); - return found; - } - /** - * Return the most specific node at the line and column number given. - * The source location is based on the original parsed location, locations aren't - * updated as selector nodes are mutated. - * - * Note that this location is relative to the location of the first character - * of the selector, and not the location of the selector in the overall document - * when used in conjunction with postcss. - * - * If not found, returns undefined. - * @param {number} line The line number of the node to find. (1-based index) - * @param {number} col The column number of the node to find. (1-based index) - */ - ; - - _proto.atPosition = function atPosition(line, col) { - if (this.isAtPosition(line, col)) { - return this._findChildAtPosition(line, col) || this; - } else { - return undefined; - } - }; - - _proto._inferEndPosition = function _inferEndPosition() { - if (this.last && this.last.source && this.last.source.end) { - this.source = this.source || {}; - this.source.end = this.source.end || {}; - Object.assign(this.source.end, this.last.source.end); - } - }; - - _proto.each = function each(callback) { - if (!this.lastEach) { - this.lastEach = 0; - } - - if (!this.indexes) { - this.indexes = {}; - } - - this.lastEach++; - var id = this.lastEach; - this.indexes[id] = 0; - - if (!this.length) { - return undefined; - } - - var index, result; - - while (this.indexes[id] < this.length) { - index = this.indexes[id]; - result = callback(this.at(index), index); - - if (result === false) { - break; - } - - this.indexes[id] += 1; - } - - delete this.indexes[id]; - - if (result === false) { - return false; - } - }; - - _proto.walk = function walk(callback) { - return this.each(function (node, i) { - var result = callback(node, i); - - if (result !== false && node.length) { - result = node.walk(callback); - } - - if (result === false) { - return false; - } - }); - }; - - _proto.walkAttributes = function walkAttributes(callback) { - var _this2 = this; - - return this.walk(function (selector) { - if (selector.type === types$1.ATTRIBUTE) { - return callback.call(_this2, selector); - } - }); - }; - - _proto.walkClasses = function walkClasses(callback) { - var _this3 = this; - - return this.walk(function (selector) { - if (selector.type === types$1.CLASS) { - return callback.call(_this3, selector); - } - }); - }; - - _proto.walkCombinators = function walkCombinators(callback) { - var _this4 = this; - - return this.walk(function (selector) { - if (selector.type === types$1.COMBINATOR) { - return callback.call(_this4, selector); - } - }); - }; - - _proto.walkComments = function walkComments(callback) { - var _this5 = this; - - return this.walk(function (selector) { - if (selector.type === types$1.COMMENT) { - return callback.call(_this5, selector); - } - }); - }; - - _proto.walkIds = function walkIds(callback) { - var _this6 = this; - - return this.walk(function (selector) { - if (selector.type === types$1.ID) { - return callback.call(_this6, selector); - } - }); - }; - - _proto.walkNesting = function walkNesting(callback) { - var _this7 = this; - - return this.walk(function (selector) { - if (selector.type === types$1.NESTING) { - return callback.call(_this7, selector); - } - }); - }; - - _proto.walkPseudos = function walkPseudos(callback) { - var _this8 = this; - - return this.walk(function (selector) { - if (selector.type === types$1.PSEUDO) { - return callback.call(_this8, selector); - } - }); - }; - - _proto.walkTags = function walkTags(callback) { - var _this9 = this; - - return this.walk(function (selector) { - if (selector.type === types$1.TAG) { - return callback.call(_this9, selector); - } - }); - }; - - _proto.walkUniversals = function walkUniversals(callback) { - var _this10 = this; - - return this.walk(function (selector) { - if (selector.type === types$1.UNIVERSAL) { - return callback.call(_this10, selector); - } - }); - }; - - _proto.split = function split(callback) { - var _this11 = this; - - var current = []; - return this.reduce(function (memo, node, index) { - var split = callback.call(_this11, node); - current.push(node); - - if (split) { - memo.push(current); - current = []; - } else if (index === _this11.length - 1) { - memo.push(current); - } - - return memo; - }, []); - }; - - _proto.map = function map(callback) { - return this.nodes.map(callback); - }; - - _proto.reduce = function reduce(callback, memo) { - return this.nodes.reduce(callback, memo); - }; - - _proto.every = function every(callback) { - return this.nodes.every(callback); - }; - - _proto.some = function some(callback) { - return this.nodes.some(callback); - }; - - _proto.filter = function filter(callback) { - return this.nodes.filter(callback); - }; - - _proto.sort = function sort(callback) { - return this.nodes.sort(callback); - }; - - _proto.toString = function toString() { - return this.map(String).join(''); - }; - - _createClass(Container, [{ - key: "first", - get: function get() { - return this.at(0); - } - }, { - key: "last", - get: function get() { - return this.at(this.length - 1); - } - }, { - key: "length", - get: function get() { - return this.nodes.length; - } - }]); - - return Container; - }(_node["default"]); - - exports["default"] = Container; - module.exports = exports.default; -} (container, container.exports)); - -var containerExports = container.exports; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _container = _interopRequireDefault(containerExports); - - var _types = types; - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } - - function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var Root = /*#__PURE__*/function (_Container) { - _inheritsLoose(Root, _Container); - - function Root(opts) { - var _this; - - _this = _Container.call(this, opts) || this; - _this.type = _types.ROOT; - return _this; - } - - var _proto = Root.prototype; - - _proto.toString = function toString() { - var str = this.reduce(function (memo, selector) { - memo.push(String(selector)); - return memo; - }, []).join(','); - return this.trailingComma ? str + ',' : str; - }; - - _proto.error = function error(message, options) { - if (this._error) { - return this._error(message, options); - } else { - return new Error(message); - } - }; - - _createClass(Root, [{ - key: "errorGenerator", - set: function set(handler) { - this._error = handler; - } - }]); - - return Root; - }(_container["default"]); - - exports["default"] = Root; - module.exports = exports.default; -} (root$1, root$1.exports)); - -var rootExports = root$1.exports; - -var selector$1 = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _container = _interopRequireDefault(containerExports); - - var _types = types; - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var Selector = /*#__PURE__*/function (_Container) { - _inheritsLoose(Selector, _Container); - - function Selector(opts) { - var _this; - - _this = _Container.call(this, opts) || this; - _this.type = _types.SELECTOR; - return _this; - } - - return Selector; - }(_container["default"]); - - exports["default"] = Selector; - module.exports = exports.default; -} (selector$1, selector$1.exports)); - -var selectorExports = selector$1.exports; - -var className$1 = {exports: {}}; - -/*! https://mths.be/cssesc v3.0.0 by @mathias */ - -var object = {}; -var hasOwnProperty$1 = object.hasOwnProperty; -var merge = function merge(options, defaults) { - if (!options) { - return defaults; - } - var result = {}; - for (var key in defaults) { - // `if (defaults.hasOwnProperty(key) { … }` is not needed here, since - // only recognized option names are used. - result[key] = hasOwnProperty$1.call(options, key) ? options[key] : defaults[key]; - } - return result; -}; - -var regexAnySingleEscape = /[ -,\.\/:-@\[-\^`\{-~]/; -var regexSingleEscape = /[ -,\.\/:-@\[\]\^`\{-~]/; -var regexExcessiveSpaces = /(^|\\+)?(\\[A-F0-9]{1,6})\x20(?![a-fA-F0-9\x20])/g; - -// https://mathiasbynens.be/notes/css-escapes#css -var cssesc = function cssesc(string, options) { - options = merge(options, cssesc.options); - if (options.quotes != 'single' && options.quotes != 'double') { - options.quotes = 'single'; - } - var quote = options.quotes == 'double' ? '"' : '\''; - var isIdentifier = options.isIdentifier; - - var firstChar = string.charAt(0); - var output = ''; - var counter = 0; - var length = string.length; - while (counter < length) { - var character = string.charAt(counter++); - var codePoint = character.charCodeAt(); - var value = void 0; - // If it’s not a printable ASCII character… - if (codePoint < 0x20 || codePoint > 0x7E) { - if (codePoint >= 0xD800 && codePoint <= 0xDBFF && counter < length) { - // It’s a high surrogate, and there is a next character. - var extra = string.charCodeAt(counter++); - if ((extra & 0xFC00) == 0xDC00) { - // next character is low surrogate - codePoint = ((codePoint & 0x3FF) << 10) + (extra & 0x3FF) + 0x10000; - } else { - // It’s an unmatched surrogate; only append this code unit, in case - // the next code unit is the high surrogate of a surrogate pair. - counter--; - } - } - value = '\\' + codePoint.toString(16).toUpperCase() + ' '; - } else { - if (options.escapeEverything) { - if (regexAnySingleEscape.test(character)) { - value = '\\' + character; - } else { - value = '\\' + codePoint.toString(16).toUpperCase() + ' '; - } - } else if (/[\t\n\f\r\x0B]/.test(character)) { - value = '\\' + codePoint.toString(16).toUpperCase() + ' '; - } else if (character == '\\' || !isIdentifier && (character == '"' && quote == character || character == '\'' && quote == character) || isIdentifier && regexSingleEscape.test(character)) { - value = '\\' + character; - } else { - value = character; - } - } - output += value; - } - - if (isIdentifier) { - if (/^-[-\d]/.test(output)) { - output = '\\-' + output.slice(1); - } else if (/\d/.test(firstChar)) { - output = '\\3' + firstChar + ' ' + output.slice(1); - } - } - - // Remove spaces after `\HEX` escapes that are not followed by a hex digit, - // since they’re redundant. Note that this is only possible if the escape - // sequence isn’t preceded by an odd number of backslashes. - output = output.replace(regexExcessiveSpaces, function ($0, $1, $2) { - if ($1 && $1.length % 2) { - // It’s not safe to remove the space, so don’t. - return $0; - } - // Strip the space. - return ($1 || '') + $2; - }); - - if (!isIdentifier && options.wrap) { - return quote + output + quote; - } - return output; -}; - -// Expose default options (so they can be overridden globally). -cssesc.options = { - 'escapeEverything': false, - 'isIdentifier': false, - 'quotes': 'single', - 'wrap': false -}; - -cssesc.version = '3.0.0'; - -var cssesc_1 = cssesc; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _cssesc = _interopRequireDefault(cssesc_1); - - var _util = util; - - var _node = _interopRequireDefault(nodeExports); - - var _types = types; - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } - - function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var ClassName = /*#__PURE__*/function (_Node) { - _inheritsLoose(ClassName, _Node); - - function ClassName(opts) { - var _this; - - _this = _Node.call(this, opts) || this; - _this.type = _types.CLASS; - _this._constructed = true; - return _this; - } - - var _proto = ClassName.prototype; - - _proto.valueToString = function valueToString() { - return '.' + _Node.prototype.valueToString.call(this); - }; - - _createClass(ClassName, [{ - key: "value", - get: function get() { - return this._value; - }, - set: function set(v) { - if (this._constructed) { - var escaped = (0, _cssesc["default"])(v, { - isIdentifier: true - }); - - if (escaped !== v) { - (0, _util.ensureObject)(this, "raws"); - this.raws.value = escaped; - } else if (this.raws) { - delete this.raws.value; - } - } - - this._value = v; - } - }]); - - return ClassName; - }(_node["default"]); - - exports["default"] = ClassName; - module.exports = exports.default; -} (className$1, className$1.exports)); - -var classNameExports = className$1.exports; - -var comment$2 = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _node = _interopRequireDefault(nodeExports); - - var _types = types; - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var Comment = /*#__PURE__*/function (_Node) { - _inheritsLoose(Comment, _Node); - - function Comment(opts) { - var _this; - - _this = _Node.call(this, opts) || this; - _this.type = _types.COMMENT; - return _this; - } - - return Comment; - }(_node["default"]); - - exports["default"] = Comment; - module.exports = exports.default; -} (comment$2, comment$2.exports)); - -var commentExports = comment$2.exports; - -var id$1 = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _node = _interopRequireDefault(nodeExports); - - var _types = types; - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var ID = /*#__PURE__*/function (_Node) { - _inheritsLoose(ID, _Node); - - function ID(opts) { - var _this; - - _this = _Node.call(this, opts) || this; - _this.type = _types.ID; - return _this; - } - - var _proto = ID.prototype; - - _proto.valueToString = function valueToString() { - return '#' + _Node.prototype.valueToString.call(this); - }; - - return ID; - }(_node["default"]); - - exports["default"] = ID; - module.exports = exports.default; -} (id$1, id$1.exports)); - -var idExports = id$1.exports; - -var tag$1 = {exports: {}}; - -var namespace = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _cssesc = _interopRequireDefault(cssesc_1); - - var _util = util; - - var _node = _interopRequireDefault(nodeExports); - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } - - function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var Namespace = /*#__PURE__*/function (_Node) { - _inheritsLoose(Namespace, _Node); - - function Namespace() { - return _Node.apply(this, arguments) || this; - } - - var _proto = Namespace.prototype; - - _proto.qualifiedName = function qualifiedName(value) { - if (this.namespace) { - return this.namespaceString + "|" + value; - } else { - return value; - } - }; - - _proto.valueToString = function valueToString() { - return this.qualifiedName(_Node.prototype.valueToString.call(this)); - }; - - _createClass(Namespace, [{ - key: "namespace", - get: function get() { - return this._namespace; - }, - set: function set(namespace) { - if (namespace === true || namespace === "*" || namespace === "&") { - this._namespace = namespace; - - if (this.raws) { - delete this.raws.namespace; - } - - return; - } - - var escaped = (0, _cssesc["default"])(namespace, { - isIdentifier: true - }); - this._namespace = namespace; - - if (escaped !== namespace) { - (0, _util.ensureObject)(this, "raws"); - this.raws.namespace = escaped; - } else if (this.raws) { - delete this.raws.namespace; - } - } - }, { - key: "ns", - get: function get() { - return this._namespace; - }, - set: function set(namespace) { - this.namespace = namespace; - } - }, { - key: "namespaceString", - get: function get() { - if (this.namespace) { - var ns = this.stringifyProperty("namespace"); - - if (ns === true) { - return ''; - } else { - return ns; - } - } else { - return ''; - } - } - }]); - - return Namespace; - }(_node["default"]); - - exports["default"] = Namespace; - module.exports = exports.default; -} (namespace, namespace.exports)); - -var namespaceExports = namespace.exports; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _namespace = _interopRequireDefault(namespaceExports); - - var _types = types; - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var Tag = /*#__PURE__*/function (_Namespace) { - _inheritsLoose(Tag, _Namespace); - - function Tag(opts) { - var _this; - - _this = _Namespace.call(this, opts) || this; - _this.type = _types.TAG; - return _this; - } - - return Tag; - }(_namespace["default"]); - - exports["default"] = Tag; - module.exports = exports.default; -} (tag$1, tag$1.exports)); - -var tagExports = tag$1.exports; - -var string$1 = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _node = _interopRequireDefault(nodeExports); - - var _types = types; - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var String = /*#__PURE__*/function (_Node) { - _inheritsLoose(String, _Node); - - function String(opts) { - var _this; - - _this = _Node.call(this, opts) || this; - _this.type = _types.STRING; - return _this; - } - - return String; - }(_node["default"]); - - exports["default"] = String; - module.exports = exports.default; -} (string$1, string$1.exports)); - -var stringExports = string$1.exports; - -var pseudo$1 = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _container = _interopRequireDefault(containerExports); - - var _types = types; - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var Pseudo = /*#__PURE__*/function (_Container) { - _inheritsLoose(Pseudo, _Container); - - function Pseudo(opts) { - var _this; - - _this = _Container.call(this, opts) || this; - _this.type = _types.PSEUDO; - return _this; - } - - var _proto = Pseudo.prototype; - - _proto.toString = function toString() { - var params = this.length ? '(' + this.map(String).join(',') + ')' : ''; - return [this.rawSpaceBefore, this.stringifyProperty("value"), params, this.rawSpaceAfter].join(''); - }; - - return Pseudo; - }(_container["default"]); - - exports["default"] = Pseudo; - module.exports = exports.default; -} (pseudo$1, pseudo$1.exports)); - -var pseudoExports = pseudo$1.exports; - -var attribute$1 = {}; - -/** - * For Node.js, simply re-export the core `util.deprecate` function. - */ - -var node = require$$0$6.deprecate; - -(function (exports) { - - exports.__esModule = true; - exports.unescapeValue = unescapeValue; - exports["default"] = void 0; - - var _cssesc = _interopRequireDefault(cssesc_1); - - var _unesc = _interopRequireDefault(unescExports); - - var _namespace = _interopRequireDefault(namespaceExports); - - var _types = types; - - var _CSSESC_QUOTE_OPTIONS; - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } - - function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var deprecate = node; - - var WRAPPED_IN_QUOTES = /^('|")([^]*)\1$/; - var warnOfDeprecatedValueAssignment = deprecate(function () {}, "Assigning an attribute a value containing characters that might need to be escaped is deprecated. " + "Call attribute.setValue() instead."); - var warnOfDeprecatedQuotedAssignment = deprecate(function () {}, "Assigning attr.quoted is deprecated and has no effect. Assign to attr.quoteMark instead."); - var warnOfDeprecatedConstructor = deprecate(function () {}, "Constructing an Attribute selector with a value without specifying quoteMark is deprecated. Note: The value should be unescaped now."); - - function unescapeValue(value) { - var deprecatedUsage = false; - var quoteMark = null; - var unescaped = value; - var m = unescaped.match(WRAPPED_IN_QUOTES); - - if (m) { - quoteMark = m[1]; - unescaped = m[2]; - } - - unescaped = (0, _unesc["default"])(unescaped); - - if (unescaped !== value) { - deprecatedUsage = true; - } - - return { - deprecatedUsage: deprecatedUsage, - unescaped: unescaped, - quoteMark: quoteMark - }; - } - - function handleDeprecatedContructorOpts(opts) { - if (opts.quoteMark !== undefined) { - return opts; - } - - if (opts.value === undefined) { - return opts; - } - - warnOfDeprecatedConstructor(); - - var _unescapeValue = unescapeValue(opts.value), - quoteMark = _unescapeValue.quoteMark, - unescaped = _unescapeValue.unescaped; - - if (!opts.raws) { - opts.raws = {}; - } - - if (opts.raws.value === undefined) { - opts.raws.value = opts.value; - } - - opts.value = unescaped; - opts.quoteMark = quoteMark; - return opts; - } - - var Attribute = /*#__PURE__*/function (_Namespace) { - _inheritsLoose(Attribute, _Namespace); - - function Attribute(opts) { - var _this; - - if (opts === void 0) { - opts = {}; - } - - _this = _Namespace.call(this, handleDeprecatedContructorOpts(opts)) || this; - _this.type = _types.ATTRIBUTE; - _this.raws = _this.raws || {}; - Object.defineProperty(_this.raws, 'unquoted', { - get: deprecate(function () { - return _this.value; - }, "attr.raws.unquoted is deprecated. Call attr.value instead."), - set: deprecate(function () { - return _this.value; - }, "Setting attr.raws.unquoted is deprecated and has no effect. attr.value is unescaped by default now.") - }); - _this._constructed = true; - return _this; - } - /** - * Returns the Attribute's value quoted such that it would be legal to use - * in the value of a css file. The original value's quotation setting - * used for stringification is left unchanged. See `setValue(value, options)` - * if you want to control the quote settings of a new value for the attribute. - * - * You can also change the quotation used for the current value by setting quoteMark. - * - * Options: - * * quoteMark {'"' | "'" | null} - Use this value to quote the value. If this - * option is not set, the original value for quoteMark will be used. If - * indeterminate, a double quote is used. The legal values are: - * * `null` - the value will be unquoted and characters will be escaped as necessary. - * * `'` - the value will be quoted with a single quote and single quotes are escaped. - * * `"` - the value will be quoted with a double quote and double quotes are escaped. - * * preferCurrentQuoteMark {boolean} - if true, prefer the source quote mark - * over the quoteMark option value. - * * smart {boolean} - if true, will select a quote mark based on the value - * and the other options specified here. See the `smartQuoteMark()` - * method. - **/ - - - var _proto = Attribute.prototype; - - _proto.getQuotedValue = function getQuotedValue(options) { - if (options === void 0) { - options = {}; - } - - var quoteMark = this._determineQuoteMark(options); - - var cssescopts = CSSESC_QUOTE_OPTIONS[quoteMark]; - var escaped = (0, _cssesc["default"])(this._value, cssescopts); - return escaped; - }; - - _proto._determineQuoteMark = function _determineQuoteMark(options) { - return options.smart ? this.smartQuoteMark(options) : this.preferredQuoteMark(options); - } - /** - * Set the unescaped value with the specified quotation options. The value - * provided must not include any wrapping quote marks -- those quotes will - * be interpreted as part of the value and escaped accordingly. - */ - ; - - _proto.setValue = function setValue(value, options) { - if (options === void 0) { - options = {}; - } - - this._value = value; - this._quoteMark = this._determineQuoteMark(options); - - this._syncRawValue(); - } - /** - * Intelligently select a quoteMark value based on the value's contents. If - * the value is a legal CSS ident, it will not be quoted. Otherwise a quote - * mark will be picked that minimizes the number of escapes. - * - * If there's no clear winner, the quote mark from these options is used, - * then the source quote mark (this is inverted if `preferCurrentQuoteMark` is - * true). If the quoteMark is unspecified, a double quote is used. - * - * @param options This takes the quoteMark and preferCurrentQuoteMark options - * from the quoteValue method. - */ - ; - - _proto.smartQuoteMark = function smartQuoteMark(options) { - var v = this.value; - var numSingleQuotes = v.replace(/[^']/g, '').length; - var numDoubleQuotes = v.replace(/[^"]/g, '').length; - - if (numSingleQuotes + numDoubleQuotes === 0) { - var escaped = (0, _cssesc["default"])(v, { - isIdentifier: true - }); - - if (escaped === v) { - return Attribute.NO_QUOTE; - } else { - var pref = this.preferredQuoteMark(options); - - if (pref === Attribute.NO_QUOTE) { - // pick a quote mark that isn't none and see if it's smaller - var quote = this.quoteMark || options.quoteMark || Attribute.DOUBLE_QUOTE; - var opts = CSSESC_QUOTE_OPTIONS[quote]; - var quoteValue = (0, _cssesc["default"])(v, opts); - - if (quoteValue.length < escaped.length) { - return quote; - } - } - - return pref; - } - } else if (numDoubleQuotes === numSingleQuotes) { - return this.preferredQuoteMark(options); - } else if (numDoubleQuotes < numSingleQuotes) { - return Attribute.DOUBLE_QUOTE; - } else { - return Attribute.SINGLE_QUOTE; - } - } - /** - * Selects the preferred quote mark based on the options and the current quote mark value. - * If you want the quote mark to depend on the attribute value, call `smartQuoteMark(opts)` - * instead. - */ - ; - - _proto.preferredQuoteMark = function preferredQuoteMark(options) { - var quoteMark = options.preferCurrentQuoteMark ? this.quoteMark : options.quoteMark; - - if (quoteMark === undefined) { - quoteMark = options.preferCurrentQuoteMark ? options.quoteMark : this.quoteMark; - } - - if (quoteMark === undefined) { - quoteMark = Attribute.DOUBLE_QUOTE; - } - - return quoteMark; - }; - - _proto._syncRawValue = function _syncRawValue() { - var rawValue = (0, _cssesc["default"])(this._value, CSSESC_QUOTE_OPTIONS[this.quoteMark]); - - if (rawValue === this._value) { - if (this.raws) { - delete this.raws.value; - } - } else { - this.raws.value = rawValue; - } - }; - - _proto._handleEscapes = function _handleEscapes(prop, value) { - if (this._constructed) { - var escaped = (0, _cssesc["default"])(value, { - isIdentifier: true - }); - - if (escaped !== value) { - this.raws[prop] = escaped; - } else { - delete this.raws[prop]; - } - } - }; - - _proto._spacesFor = function _spacesFor(name) { - var attrSpaces = { - before: '', - after: '' - }; - var spaces = this.spaces[name] || {}; - var rawSpaces = this.raws.spaces && this.raws.spaces[name] || {}; - return Object.assign(attrSpaces, spaces, rawSpaces); - }; - - _proto._stringFor = function _stringFor(name, spaceName, concat) { - if (spaceName === void 0) { - spaceName = name; - } - - if (concat === void 0) { - concat = defaultAttrConcat; - } - - var attrSpaces = this._spacesFor(spaceName); - - return concat(this.stringifyProperty(name), attrSpaces); - } - /** - * returns the offset of the attribute part specified relative to the - * start of the node of the output string. - * - * * "ns" - alias for "namespace" - * * "namespace" - the namespace if it exists. - * * "attribute" - the attribute name - * * "attributeNS" - the start of the attribute or its namespace - * * "operator" - the match operator of the attribute - * * "value" - The value (string or identifier) - * * "insensitive" - the case insensitivity flag; - * @param part One of the possible values inside an attribute. - * @returns -1 if the name is invalid or the value doesn't exist in this attribute. - */ - ; - - _proto.offsetOf = function offsetOf(name) { - var count = 1; - - var attributeSpaces = this._spacesFor("attribute"); - - count += attributeSpaces.before.length; - - if (name === "namespace" || name === "ns") { - return this.namespace ? count : -1; - } - - if (name === "attributeNS") { - return count; - } - - count += this.namespaceString.length; - - if (this.namespace) { - count += 1; - } - - if (name === "attribute") { - return count; - } - - count += this.stringifyProperty("attribute").length; - count += attributeSpaces.after.length; - - var operatorSpaces = this._spacesFor("operator"); - - count += operatorSpaces.before.length; - var operator = this.stringifyProperty("operator"); - - if (name === "operator") { - return operator ? count : -1; - } - - count += operator.length; - count += operatorSpaces.after.length; - - var valueSpaces = this._spacesFor("value"); - - count += valueSpaces.before.length; - var value = this.stringifyProperty("value"); - - if (name === "value") { - return value ? count : -1; - } - - count += value.length; - count += valueSpaces.after.length; - - var insensitiveSpaces = this._spacesFor("insensitive"); - - count += insensitiveSpaces.before.length; - - if (name === "insensitive") { - return this.insensitive ? count : -1; - } - - return -1; - }; - - _proto.toString = function toString() { - var _this2 = this; - - var selector = [this.rawSpaceBefore, '[']; - selector.push(this._stringFor('qualifiedAttribute', 'attribute')); - - if (this.operator && (this.value || this.value === '')) { - selector.push(this._stringFor('operator')); - selector.push(this._stringFor('value')); - selector.push(this._stringFor('insensitiveFlag', 'insensitive', function (attrValue, attrSpaces) { - if (attrValue.length > 0 && !_this2.quoted && attrSpaces.before.length === 0 && !(_this2.spaces.value && _this2.spaces.value.after)) { - attrSpaces.before = " "; - } - - return defaultAttrConcat(attrValue, attrSpaces); - })); - } - - selector.push(']'); - selector.push(this.rawSpaceAfter); - return selector.join(''); - }; - - _createClass(Attribute, [{ - key: "quoted", - get: function get() { - var qm = this.quoteMark; - return qm === "'" || qm === '"'; - }, - set: function set(value) { - warnOfDeprecatedQuotedAssignment(); - } - /** - * returns a single (`'`) or double (`"`) quote character if the value is quoted. - * returns `null` if the value is not quoted. - * returns `undefined` if the quotation state is unknown (this can happen when - * the attribute is constructed without specifying a quote mark.) - */ - - }, { - key: "quoteMark", - get: function get() { - return this._quoteMark; - } - /** - * Set the quote mark to be used by this attribute's value. - * If the quote mark changes, the raw (escaped) value at `attr.raws.value` of the attribute - * value is updated accordingly. - * - * @param {"'" | '"' | null} quoteMark The quote mark or `null` if the value should be unquoted. - */ - , - set: function set(quoteMark) { - if (!this._constructed) { - this._quoteMark = quoteMark; - return; - } - - if (this._quoteMark !== quoteMark) { - this._quoteMark = quoteMark; - - this._syncRawValue(); - } - } - }, { - key: "qualifiedAttribute", - get: function get() { - return this.qualifiedName(this.raws.attribute || this.attribute); - } - }, { - key: "insensitiveFlag", - get: function get() { - return this.insensitive ? 'i' : ''; - } - }, { - key: "value", - get: function get() { - return this._value; - }, - set: - /** - * Before 3.0, the value had to be set to an escaped value including any wrapped - * quote marks. In 3.0, the semantics of `Attribute.value` changed so that the value - * is unescaped during parsing and any quote marks are removed. - * - * Because the ambiguity of this semantic change, if you set `attr.value = newValue`, - * a deprecation warning is raised when the new value contains any characters that would - * require escaping (including if it contains wrapped quotes). - * - * Instead, you should call `attr.setValue(newValue, opts)` and pass options that describe - * how the new value is quoted. - */ - function set(v) { - if (this._constructed) { - var _unescapeValue2 = unescapeValue(v), - deprecatedUsage = _unescapeValue2.deprecatedUsage, - unescaped = _unescapeValue2.unescaped, - quoteMark = _unescapeValue2.quoteMark; - - if (deprecatedUsage) { - warnOfDeprecatedValueAssignment(); - } - - if (unescaped === this._value && quoteMark === this._quoteMark) { - return; - } - - this._value = unescaped; - this._quoteMark = quoteMark; - - this._syncRawValue(); - } else { - this._value = v; - } - } - }, { - key: "insensitive", - get: function get() { - return this._insensitive; - } - /** - * Set the case insensitive flag. - * If the case insensitive flag changes, the raw (escaped) value at `attr.raws.insensitiveFlag` - * of the attribute is updated accordingly. - * - * @param {true | false} insensitive true if the attribute should match case-insensitively. - */ - , - set: function set(insensitive) { - if (!insensitive) { - this._insensitive = false; // "i" and "I" can be used in "this.raws.insensitiveFlag" to store the original notation. - // When setting `attr.insensitive = false` both should be erased to ensure correct serialization. - - if (this.raws && (this.raws.insensitiveFlag === 'I' || this.raws.insensitiveFlag === 'i')) { - this.raws.insensitiveFlag = undefined; - } - } - - this._insensitive = insensitive; - } - }, { - key: "attribute", - get: function get() { - return this._attribute; - }, - set: function set(name) { - this._handleEscapes("attribute", name); - - this._attribute = name; - } - }]); - - return Attribute; - }(_namespace["default"]); - - exports["default"] = Attribute; - Attribute.NO_QUOTE = null; - Attribute.SINGLE_QUOTE = "'"; - Attribute.DOUBLE_QUOTE = '"'; - var CSSESC_QUOTE_OPTIONS = (_CSSESC_QUOTE_OPTIONS = { - "'": { - quotes: 'single', - wrap: true - }, - '"': { - quotes: 'double', - wrap: true - } - }, _CSSESC_QUOTE_OPTIONS[null] = { - isIdentifier: true - }, _CSSESC_QUOTE_OPTIONS); - - function defaultAttrConcat(attrValue, attrSpaces) { - return "" + attrSpaces.before + attrValue + attrSpaces.after; - } -} (attribute$1)); - -var universal$1 = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _namespace = _interopRequireDefault(namespaceExports); - - var _types = types; - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var Universal = /*#__PURE__*/function (_Namespace) { - _inheritsLoose(Universal, _Namespace); - - function Universal(opts) { - var _this; - - _this = _Namespace.call(this, opts) || this; - _this.type = _types.UNIVERSAL; - _this.value = '*'; - return _this; - } - - return Universal; - }(_namespace["default"]); - - exports["default"] = Universal; - module.exports = exports.default; -} (universal$1, universal$1.exports)); - -var universalExports = universal$1.exports; - -var combinator$2 = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _node = _interopRequireDefault(nodeExports); - - var _types = types; - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var Combinator = /*#__PURE__*/function (_Node) { - _inheritsLoose(Combinator, _Node); - - function Combinator(opts) { - var _this; - - _this = _Node.call(this, opts) || this; - _this.type = _types.COMBINATOR; - return _this; - } - - return Combinator; - }(_node["default"]); - - exports["default"] = Combinator; - module.exports = exports.default; -} (combinator$2, combinator$2.exports)); - -var combinatorExports = combinator$2.exports; - -var nesting$1 = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _node = _interopRequireDefault(nodeExports); - - var _types = types; - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; _setPrototypeOf(subClass, superClass); } - - function _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); } - - var Nesting = /*#__PURE__*/function (_Node) { - _inheritsLoose(Nesting, _Node); - - function Nesting(opts) { - var _this; - - _this = _Node.call(this, opts) || this; - _this.type = _types.NESTING; - _this.value = '&'; - return _this; - } - - return Nesting; - }(_node["default"]); - - exports["default"] = Nesting; - module.exports = exports.default; -} (nesting$1, nesting$1.exports)); - -var nestingExports = nesting$1.exports; - -var sortAscending = {exports: {}}; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = sortAscending; - - function sortAscending(list) { - return list.sort(function (a, b) { - return a - b; - }); - } - module.exports = exports.default; -} (sortAscending, sortAscending.exports)); - -var sortAscendingExports = sortAscending.exports; - -var tokenize = {}; - -var tokenTypes = {}; - -tokenTypes.__esModule = true; -tokenTypes.combinator = tokenTypes.word = tokenTypes.comment = tokenTypes.str = tokenTypes.tab = tokenTypes.newline = tokenTypes.feed = tokenTypes.cr = tokenTypes.backslash = tokenTypes.bang = tokenTypes.slash = tokenTypes.doubleQuote = tokenTypes.singleQuote = tokenTypes.space = tokenTypes.greaterThan = tokenTypes.pipe = tokenTypes.equals = tokenTypes.plus = tokenTypes.caret = tokenTypes.tilde = tokenTypes.dollar = tokenTypes.closeSquare = tokenTypes.openSquare = tokenTypes.closeParenthesis = tokenTypes.openParenthesis = tokenTypes.semicolon = tokenTypes.colon = tokenTypes.comma = tokenTypes.at = tokenTypes.asterisk = tokenTypes.ampersand = void 0; -var ampersand = 38; // `&`.charCodeAt(0); - -tokenTypes.ampersand = ampersand; -var asterisk = 42; // `*`.charCodeAt(0); - -tokenTypes.asterisk = asterisk; -var at = 64; // `@`.charCodeAt(0); - -tokenTypes.at = at; -var comma = 44; // `,`.charCodeAt(0); - -tokenTypes.comma = comma; -var colon = 58; // `:`.charCodeAt(0); - -tokenTypes.colon = colon; -var semicolon = 59; // `;`.charCodeAt(0); - -tokenTypes.semicolon = semicolon; -var openParenthesis = 40; // `(`.charCodeAt(0); - -tokenTypes.openParenthesis = openParenthesis; -var closeParenthesis = 41; // `)`.charCodeAt(0); - -tokenTypes.closeParenthesis = closeParenthesis; -var openSquare = 91; // `[`.charCodeAt(0); - -tokenTypes.openSquare = openSquare; -var closeSquare = 93; // `]`.charCodeAt(0); - -tokenTypes.closeSquare = closeSquare; -var dollar = 36; // `$`.charCodeAt(0); - -tokenTypes.dollar = dollar; -var tilde = 126; // `~`.charCodeAt(0); - -tokenTypes.tilde = tilde; -var caret = 94; // `^`.charCodeAt(0); - -tokenTypes.caret = caret; -var plus = 43; // `+`.charCodeAt(0); - -tokenTypes.plus = plus; -var equals = 61; // `=`.charCodeAt(0); - -tokenTypes.equals = equals; -var pipe = 124; // `|`.charCodeAt(0); - -tokenTypes.pipe = pipe; -var greaterThan = 62; // `>`.charCodeAt(0); - -tokenTypes.greaterThan = greaterThan; -var space = 32; // ` `.charCodeAt(0); - -tokenTypes.space = space; -var singleQuote = 39; // `'`.charCodeAt(0); - -tokenTypes.singleQuote = singleQuote; -var doubleQuote = 34; // `"`.charCodeAt(0); - -tokenTypes.doubleQuote = doubleQuote; -var slash = 47; // `/`.charCodeAt(0); - -tokenTypes.slash = slash; -var bang = 33; // `!`.charCodeAt(0); - -tokenTypes.bang = bang; -var backslash = 92; // '\\'.charCodeAt(0); - -tokenTypes.backslash = backslash; -var cr = 13; // '\r'.charCodeAt(0); - -tokenTypes.cr = cr; -var feed = 12; // '\f'.charCodeAt(0); - -tokenTypes.feed = feed; -var newline = 10; // '\n'.charCodeAt(0); - -tokenTypes.newline = newline; -var tab = 9; // '\t'.charCodeAt(0); -// Expose aliases primarily for readability. - -tokenTypes.tab = tab; -var str = singleQuote; // No good single character representation! - -tokenTypes.str = str; -var comment$1 = -1; -tokenTypes.comment = comment$1; -var word = -2; -tokenTypes.word = word; -var combinator$1 = -3; -tokenTypes.combinator = combinator$1; - -(function (exports) { - - exports.__esModule = true; - exports["default"] = tokenize; - exports.FIELDS = void 0; - - var t = _interopRequireWildcard(tokenTypes); - - var _unescapable, _wordDelimiters; - - function _getRequireWildcardCache() { if (typeof WeakMap !== "function") return null; var cache = new WeakMap(); _getRequireWildcardCache = function _getRequireWildcardCache() { return cache; }; return cache; } - - function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } if (obj === null || typeof obj !== "object" && typeof obj !== "function") { return { "default": obj }; } var cache = _getRequireWildcardCache(); if (cache && cache.has(obj)) { return cache.get(obj); } var newObj = {}; var hasPropertyDescriptor = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { var desc = hasPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : null; if (desc && (desc.get || desc.set)) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } newObj["default"] = obj; if (cache) { cache.set(obj, newObj); } return newObj; } - - var unescapable = (_unescapable = {}, _unescapable[t.tab] = true, _unescapable[t.newline] = true, _unescapable[t.cr] = true, _unescapable[t.feed] = true, _unescapable); - var wordDelimiters = (_wordDelimiters = {}, _wordDelimiters[t.space] = true, _wordDelimiters[t.tab] = true, _wordDelimiters[t.newline] = true, _wordDelimiters[t.cr] = true, _wordDelimiters[t.feed] = true, _wordDelimiters[t.ampersand] = true, _wordDelimiters[t.asterisk] = true, _wordDelimiters[t.bang] = true, _wordDelimiters[t.comma] = true, _wordDelimiters[t.colon] = true, _wordDelimiters[t.semicolon] = true, _wordDelimiters[t.openParenthesis] = true, _wordDelimiters[t.closeParenthesis] = true, _wordDelimiters[t.openSquare] = true, _wordDelimiters[t.closeSquare] = true, _wordDelimiters[t.singleQuote] = true, _wordDelimiters[t.doubleQuote] = true, _wordDelimiters[t.plus] = true, _wordDelimiters[t.pipe] = true, _wordDelimiters[t.tilde] = true, _wordDelimiters[t.greaterThan] = true, _wordDelimiters[t.equals] = true, _wordDelimiters[t.dollar] = true, _wordDelimiters[t.caret] = true, _wordDelimiters[t.slash] = true, _wordDelimiters); - var hex = {}; - var hexChars = "0123456789abcdefABCDEF"; - - for (var i = 0; i < hexChars.length; i++) { - hex[hexChars.charCodeAt(i)] = true; - } - /** - * Returns the last index of the bar css word - * @param {string} css The string in which the word begins - * @param {number} start The index into the string where word's first letter occurs - */ - - - function consumeWord(css, start) { - var next = start; - var code; - - do { - code = css.charCodeAt(next); - - if (wordDelimiters[code]) { - return next - 1; - } else if (code === t.backslash) { - next = consumeEscape(css, next) + 1; - } else { - // All other characters are part of the word - next++; - } - } while (next < css.length); - - return next - 1; - } - /** - * Returns the last index of the escape sequence - * @param {string} css The string in which the sequence begins - * @param {number} start The index into the string where escape character (`\`) occurs. - */ - - - function consumeEscape(css, start) { - var next = start; - var code = css.charCodeAt(next + 1); - - if (unescapable[code]) ; else if (hex[code]) { - var hexDigits = 0; // consume up to 6 hex chars - - do { - next++; - hexDigits++; - code = css.charCodeAt(next + 1); - } while (hex[code] && hexDigits < 6); // if fewer than 6 hex chars, a trailing space ends the escape - - - if (hexDigits < 6 && code === t.space) { - next++; - } - } else { - // the next char is part of the current word - next++; - } - - return next; - } - - var FIELDS = { - TYPE: 0, - START_LINE: 1, - START_COL: 2, - END_LINE: 3, - END_COL: 4, - START_POS: 5, - END_POS: 6 - }; - exports.FIELDS = FIELDS; - - function tokenize(input) { - var tokens = []; - var css = input.css.valueOf(); - var _css = css, - length = _css.length; - var offset = -1; - var line = 1; - var start = 0; - var end = 0; - var code, content, endColumn, endLine, escaped, escapePos, last, lines, next, nextLine, nextOffset, quote, tokenType; - - function unclosed(what, fix) { - if (input.safe) { - // fyi: this is never set to true. - css += fix; - next = css.length - 1; - } else { - throw input.error('Unclosed ' + what, line, start - offset, start); - } - } - - while (start < length) { - code = css.charCodeAt(start); - - if (code === t.newline) { - offset = start; - line += 1; - } - - switch (code) { - case t.space: - case t.tab: - case t.newline: - case t.cr: - case t.feed: - next = start; - - do { - next += 1; - code = css.charCodeAt(next); - - if (code === t.newline) { - offset = next; - line += 1; - } - } while (code === t.space || code === t.newline || code === t.tab || code === t.cr || code === t.feed); - - tokenType = t.space; - endLine = line; - endColumn = next - offset - 1; - end = next; - break; - - case t.plus: - case t.greaterThan: - case t.tilde: - case t.pipe: - next = start; - - do { - next += 1; - code = css.charCodeAt(next); - } while (code === t.plus || code === t.greaterThan || code === t.tilde || code === t.pipe); - - tokenType = t.combinator; - endLine = line; - endColumn = start - offset; - end = next; - break; - // Consume these characters as single tokens. - - case t.asterisk: - case t.ampersand: - case t.bang: - case t.comma: - case t.equals: - case t.dollar: - case t.caret: - case t.openSquare: - case t.closeSquare: - case t.colon: - case t.semicolon: - case t.openParenthesis: - case t.closeParenthesis: - next = start; - tokenType = code; - endLine = line; - endColumn = start - offset; - end = next + 1; - break; - - case t.singleQuote: - case t.doubleQuote: - quote = code === t.singleQuote ? "'" : '"'; - next = start; - - do { - escaped = false; - next = css.indexOf(quote, next + 1); - - if (next === -1) { - unclosed('quote', quote); - } - - escapePos = next; - - while (css.charCodeAt(escapePos - 1) === t.backslash) { - escapePos -= 1; - escaped = !escaped; - } - } while (escaped); - - tokenType = t.str; - endLine = line; - endColumn = start - offset; - end = next + 1; - break; - - default: - if (code === t.slash && css.charCodeAt(start + 1) === t.asterisk) { - next = css.indexOf('*/', start + 2) + 1; - - if (next === 0) { - unclosed('comment', '*/'); - } - - content = css.slice(start, next + 1); - lines = content.split('\n'); - last = lines.length - 1; - - if (last > 0) { - nextLine = line + last; - nextOffset = next - lines[last].length; - } else { - nextLine = line; - nextOffset = offset; - } - - tokenType = t.comment; - line = nextLine; - endLine = nextLine; - endColumn = next - nextOffset; - } else if (code === t.slash) { - next = start; - tokenType = code; - endLine = line; - endColumn = start - offset; - end = next + 1; - } else { - next = consumeWord(css, start); - tokenType = t.word; - endLine = line; - endColumn = next - offset; - } - - end = next + 1; - break; - } // Ensure that the token structure remains consistent - - - tokens.push([tokenType, // [0] Token type - line, // [1] Starting line - start - offset, // [2] Starting column - endLine, // [3] Ending line - endColumn, // [4] Ending column - start, // [5] Start position / Source index - end // [6] End position - ]); // Reset offset for the next token - - if (nextOffset) { - offset = nextOffset; - nextOffset = null; - } - - start = end; - } - - return tokens; - } -} (tokenize)); - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _root = _interopRequireDefault(rootExports); - - var _selector = _interopRequireDefault(selectorExports); - - var _className = _interopRequireDefault(classNameExports); - - var _comment = _interopRequireDefault(commentExports); - - var _id = _interopRequireDefault(idExports); - - var _tag = _interopRequireDefault(tagExports); - - var _string = _interopRequireDefault(stringExports); - - var _pseudo = _interopRequireDefault(pseudoExports); - - var _attribute = _interopRequireWildcard(attribute$1); - - var _universal = _interopRequireDefault(universalExports); - - var _combinator = _interopRequireDefault(combinatorExports); - - var _nesting = _interopRequireDefault(nestingExports); - - var _sortAscending = _interopRequireDefault(sortAscendingExports); - - var _tokenize = _interopRequireWildcard(tokenize); - - var tokens = _interopRequireWildcard(tokenTypes); - - var types$1 = _interopRequireWildcard(types); - - var _util = util; - - var _WHITESPACE_TOKENS, _Object$assign; - - function _getRequireWildcardCache() { if (typeof WeakMap !== "function") return null; var cache = new WeakMap(); _getRequireWildcardCache = function _getRequireWildcardCache() { return cache; }; return cache; } - - function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } if (obj === null || typeof obj !== "object" && typeof obj !== "function") { return { "default": obj }; } var cache = _getRequireWildcardCache(); if (cache && cache.has(obj)) { return cache.get(obj); } var newObj = {}; var hasPropertyDescriptor = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { var desc = hasPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : null; if (desc && (desc.get || desc.set)) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } newObj["default"] = obj; if (cache) { cache.set(obj, newObj); } return newObj; } - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } - - function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; } - - var WHITESPACE_TOKENS = (_WHITESPACE_TOKENS = {}, _WHITESPACE_TOKENS[tokens.space] = true, _WHITESPACE_TOKENS[tokens.cr] = true, _WHITESPACE_TOKENS[tokens.feed] = true, _WHITESPACE_TOKENS[tokens.newline] = true, _WHITESPACE_TOKENS[tokens.tab] = true, _WHITESPACE_TOKENS); - var WHITESPACE_EQUIV_TOKENS = Object.assign({}, WHITESPACE_TOKENS, (_Object$assign = {}, _Object$assign[tokens.comment] = true, _Object$assign)); - - function tokenStart(token) { - return { - line: token[_tokenize.FIELDS.START_LINE], - column: token[_tokenize.FIELDS.START_COL] - }; - } - - function tokenEnd(token) { - return { - line: token[_tokenize.FIELDS.END_LINE], - column: token[_tokenize.FIELDS.END_COL] - }; - } - - function getSource(startLine, startColumn, endLine, endColumn) { - return { - start: { - line: startLine, - column: startColumn - }, - end: { - line: endLine, - column: endColumn - } - }; - } - - function getTokenSource(token) { - return getSource(token[_tokenize.FIELDS.START_LINE], token[_tokenize.FIELDS.START_COL], token[_tokenize.FIELDS.END_LINE], token[_tokenize.FIELDS.END_COL]); - } - - function getTokenSourceSpan(startToken, endToken) { - if (!startToken) { - return undefined; - } - - return getSource(startToken[_tokenize.FIELDS.START_LINE], startToken[_tokenize.FIELDS.START_COL], endToken[_tokenize.FIELDS.END_LINE], endToken[_tokenize.FIELDS.END_COL]); - } - - function unescapeProp(node, prop) { - var value = node[prop]; - - if (typeof value !== "string") { - return; - } - - if (value.indexOf("\\") !== -1) { - (0, _util.ensureObject)(node, 'raws'); - node[prop] = (0, _util.unesc)(value); - - if (node.raws[prop] === undefined) { - node.raws[prop] = value; - } - } - - return node; - } - - function indexesOf(array, item) { - var i = -1; - var indexes = []; - - while ((i = array.indexOf(item, i + 1)) !== -1) { - indexes.push(i); - } - - return indexes; - } - - function uniqs() { - var list = Array.prototype.concat.apply([], arguments); - return list.filter(function (item, i) { - return i === list.indexOf(item); - }); - } - - var Parser = /*#__PURE__*/function () { - function Parser(rule, options) { - if (options === void 0) { - options = {}; - } - - this.rule = rule; - this.options = Object.assign({ - lossy: false, - safe: false - }, options); - this.position = 0; - this.css = typeof this.rule === 'string' ? this.rule : this.rule.selector; - this.tokens = (0, _tokenize["default"])({ - css: this.css, - error: this._errorGenerator(), - safe: this.options.safe - }); - var rootSource = getTokenSourceSpan(this.tokens[0], this.tokens[this.tokens.length - 1]); - this.root = new _root["default"]({ - source: rootSource - }); - this.root.errorGenerator = this._errorGenerator(); - var selector = new _selector["default"]({ - source: { - start: { - line: 1, - column: 1 - } - } - }); - this.root.append(selector); - this.current = selector; - this.loop(); - } - - var _proto = Parser.prototype; - - _proto._errorGenerator = function _errorGenerator() { - var _this = this; - - return function (message, errorOptions) { - if (typeof _this.rule === 'string') { - return new Error(message); - } - - return _this.rule.error(message, errorOptions); - }; - }; - - _proto.attribute = function attribute() { - var attr = []; - var startingToken = this.currToken; - this.position++; - - while (this.position < this.tokens.length && this.currToken[_tokenize.FIELDS.TYPE] !== tokens.closeSquare) { - attr.push(this.currToken); - this.position++; - } - - if (this.currToken[_tokenize.FIELDS.TYPE] !== tokens.closeSquare) { - return this.expected('closing square bracket', this.currToken[_tokenize.FIELDS.START_POS]); - } - - var len = attr.length; - var node = { - source: getSource(startingToken[1], startingToken[2], this.currToken[3], this.currToken[4]), - sourceIndex: startingToken[_tokenize.FIELDS.START_POS] - }; - - if (len === 1 && !~[tokens.word].indexOf(attr[0][_tokenize.FIELDS.TYPE])) { - return this.expected('attribute', attr[0][_tokenize.FIELDS.START_POS]); - } - - var pos = 0; - var spaceBefore = ''; - var commentBefore = ''; - var lastAdded = null; - var spaceAfterMeaningfulToken = false; - - while (pos < len) { - var token = attr[pos]; - var content = this.content(token); - var next = attr[pos + 1]; - - switch (token[_tokenize.FIELDS.TYPE]) { - case tokens.space: - // if ( - // len === 1 || - // pos === 0 && this.content(next) === '|' - // ) { - // return this.expected('attribute', token[TOKEN.START_POS], content); - // } - spaceAfterMeaningfulToken = true; - - if (this.options.lossy) { - break; - } - - if (lastAdded) { - (0, _util.ensureObject)(node, 'spaces', lastAdded); - var prevContent = node.spaces[lastAdded].after || ''; - node.spaces[lastAdded].after = prevContent + content; - var existingComment = (0, _util.getProp)(node, 'raws', 'spaces', lastAdded, 'after') || null; - - if (existingComment) { - node.raws.spaces[lastAdded].after = existingComment + content; - } - } else { - spaceBefore = spaceBefore + content; - commentBefore = commentBefore + content; - } - - break; - - case tokens.asterisk: - if (next[_tokenize.FIELDS.TYPE] === tokens.equals) { - node.operator = content; - lastAdded = 'operator'; - } else if ((!node.namespace || lastAdded === "namespace" && !spaceAfterMeaningfulToken) && next) { - if (spaceBefore) { - (0, _util.ensureObject)(node, 'spaces', 'attribute'); - node.spaces.attribute.before = spaceBefore; - spaceBefore = ''; - } - - if (commentBefore) { - (0, _util.ensureObject)(node, 'raws', 'spaces', 'attribute'); - node.raws.spaces.attribute.before = spaceBefore; - commentBefore = ''; - } - - node.namespace = (node.namespace || "") + content; - var rawValue = (0, _util.getProp)(node, 'raws', 'namespace') || null; - - if (rawValue) { - node.raws.namespace += content; - } - - lastAdded = 'namespace'; - } - - spaceAfterMeaningfulToken = false; - break; - - case tokens.dollar: - if (lastAdded === "value") { - var oldRawValue = (0, _util.getProp)(node, 'raws', 'value'); - node.value += "$"; - - if (oldRawValue) { - node.raws.value = oldRawValue + "$"; - } - - break; - } - - // Falls through - - case tokens.caret: - if (next[_tokenize.FIELDS.TYPE] === tokens.equals) { - node.operator = content; - lastAdded = 'operator'; - } - - spaceAfterMeaningfulToken = false; - break; - - case tokens.combinator: - if (content === '~' && next[_tokenize.FIELDS.TYPE] === tokens.equals) { - node.operator = content; - lastAdded = 'operator'; - } - - if (content !== '|') { - spaceAfterMeaningfulToken = false; - break; - } - - if (next[_tokenize.FIELDS.TYPE] === tokens.equals) { - node.operator = content; - lastAdded = 'operator'; - } else if (!node.namespace && !node.attribute) { - node.namespace = true; - } - - spaceAfterMeaningfulToken = false; - break; - - case tokens.word: - if (next && this.content(next) === '|' && attr[pos + 2] && attr[pos + 2][_tokenize.FIELDS.TYPE] !== tokens.equals && // this look-ahead probably fails with comment nodes involved. - !node.operator && !node.namespace) { - node.namespace = content; - lastAdded = 'namespace'; - } else if (!node.attribute || lastAdded === "attribute" && !spaceAfterMeaningfulToken) { - if (spaceBefore) { - (0, _util.ensureObject)(node, 'spaces', 'attribute'); - node.spaces.attribute.before = spaceBefore; - spaceBefore = ''; - } - - if (commentBefore) { - (0, _util.ensureObject)(node, 'raws', 'spaces', 'attribute'); - node.raws.spaces.attribute.before = commentBefore; - commentBefore = ''; - } - - node.attribute = (node.attribute || "") + content; - - var _rawValue = (0, _util.getProp)(node, 'raws', 'attribute') || null; - - if (_rawValue) { - node.raws.attribute += content; - } - - lastAdded = 'attribute'; - } else if (!node.value && node.value !== "" || lastAdded === "value" && !(spaceAfterMeaningfulToken || node.quoteMark)) { - var _unescaped = (0, _util.unesc)(content); - - var _oldRawValue = (0, _util.getProp)(node, 'raws', 'value') || ''; - - var oldValue = node.value || ''; - node.value = oldValue + _unescaped; - node.quoteMark = null; - - if (_unescaped !== content || _oldRawValue) { - (0, _util.ensureObject)(node, 'raws'); - node.raws.value = (_oldRawValue || oldValue) + content; - } - - lastAdded = 'value'; - } else { - var insensitive = content === 'i' || content === "I"; - - if ((node.value || node.value === '') && (node.quoteMark || spaceAfterMeaningfulToken)) { - node.insensitive = insensitive; - - if (!insensitive || content === "I") { - (0, _util.ensureObject)(node, 'raws'); - node.raws.insensitiveFlag = content; - } - - lastAdded = 'insensitive'; - - if (spaceBefore) { - (0, _util.ensureObject)(node, 'spaces', 'insensitive'); - node.spaces.insensitive.before = spaceBefore; - spaceBefore = ''; - } - - if (commentBefore) { - (0, _util.ensureObject)(node, 'raws', 'spaces', 'insensitive'); - node.raws.spaces.insensitive.before = commentBefore; - commentBefore = ''; - } - } else if (node.value || node.value === '') { - lastAdded = 'value'; - node.value += content; - - if (node.raws.value) { - node.raws.value += content; - } - } - } - - spaceAfterMeaningfulToken = false; - break; - - case tokens.str: - if (!node.attribute || !node.operator) { - return this.error("Expected an attribute followed by an operator preceding the string.", { - index: token[_tokenize.FIELDS.START_POS] - }); - } - - var _unescapeValue = (0, _attribute.unescapeValue)(content), - unescaped = _unescapeValue.unescaped, - quoteMark = _unescapeValue.quoteMark; - - node.value = unescaped; - node.quoteMark = quoteMark; - lastAdded = 'value'; - (0, _util.ensureObject)(node, 'raws'); - node.raws.value = content; - spaceAfterMeaningfulToken = false; - break; - - case tokens.equals: - if (!node.attribute) { - return this.expected('attribute', token[_tokenize.FIELDS.START_POS], content); - } - - if (node.value) { - return this.error('Unexpected "=" found; an operator was already defined.', { - index: token[_tokenize.FIELDS.START_POS] - }); - } - - node.operator = node.operator ? node.operator + content : content; - lastAdded = 'operator'; - spaceAfterMeaningfulToken = false; - break; - - case tokens.comment: - if (lastAdded) { - if (spaceAfterMeaningfulToken || next && next[_tokenize.FIELDS.TYPE] === tokens.space || lastAdded === 'insensitive') { - var lastComment = (0, _util.getProp)(node, 'spaces', lastAdded, 'after') || ''; - var rawLastComment = (0, _util.getProp)(node, 'raws', 'spaces', lastAdded, 'after') || lastComment; - (0, _util.ensureObject)(node, 'raws', 'spaces', lastAdded); - node.raws.spaces[lastAdded].after = rawLastComment + content; - } else { - var lastValue = node[lastAdded] || ''; - var rawLastValue = (0, _util.getProp)(node, 'raws', lastAdded) || lastValue; - (0, _util.ensureObject)(node, 'raws'); - node.raws[lastAdded] = rawLastValue + content; - } - } else { - commentBefore = commentBefore + content; - } - - break; - - default: - return this.error("Unexpected \"" + content + "\" found.", { - index: token[_tokenize.FIELDS.START_POS] - }); - } - - pos++; - } - - unescapeProp(node, "attribute"); - unescapeProp(node, "namespace"); - this.newNode(new _attribute["default"](node)); - this.position++; - } - /** - * return a node containing meaningless garbage up to (but not including) the specified token position. - * if the token position is negative, all remaining tokens are consumed. - * - * This returns an array containing a single string node if all whitespace, - * otherwise an array of comment nodes with space before and after. - * - * These tokens are not added to the current selector, the caller can add them or use them to amend - * a previous node's space metadata. - * - * In lossy mode, this returns only comments. - */ - ; - - _proto.parseWhitespaceEquivalentTokens = function parseWhitespaceEquivalentTokens(stopPosition) { - if (stopPosition < 0) { - stopPosition = this.tokens.length; - } - - var startPosition = this.position; - var nodes = []; - var space = ""; - var lastComment = undefined; - - do { - if (WHITESPACE_TOKENS[this.currToken[_tokenize.FIELDS.TYPE]]) { - if (!this.options.lossy) { - space += this.content(); - } - } else if (this.currToken[_tokenize.FIELDS.TYPE] === tokens.comment) { - var spaces = {}; - - if (space) { - spaces.before = space; - space = ""; - } - - lastComment = new _comment["default"]({ - value: this.content(), - source: getTokenSource(this.currToken), - sourceIndex: this.currToken[_tokenize.FIELDS.START_POS], - spaces: spaces - }); - nodes.push(lastComment); - } - } while (++this.position < stopPosition); - - if (space) { - if (lastComment) { - lastComment.spaces.after = space; - } else if (!this.options.lossy) { - var firstToken = this.tokens[startPosition]; - var lastToken = this.tokens[this.position - 1]; - nodes.push(new _string["default"]({ - value: '', - source: getSource(firstToken[_tokenize.FIELDS.START_LINE], firstToken[_tokenize.FIELDS.START_COL], lastToken[_tokenize.FIELDS.END_LINE], lastToken[_tokenize.FIELDS.END_COL]), - sourceIndex: firstToken[_tokenize.FIELDS.START_POS], - spaces: { - before: space, - after: '' - } - })); - } - } - - return nodes; - } - /** - * - * @param {*} nodes - */ - ; - - _proto.convertWhitespaceNodesToSpace = function convertWhitespaceNodesToSpace(nodes, requiredSpace) { - var _this2 = this; - - if (requiredSpace === void 0) { - requiredSpace = false; - } - - var space = ""; - var rawSpace = ""; - nodes.forEach(function (n) { - var spaceBefore = _this2.lossySpace(n.spaces.before, requiredSpace); - - var rawSpaceBefore = _this2.lossySpace(n.rawSpaceBefore, requiredSpace); - - space += spaceBefore + _this2.lossySpace(n.spaces.after, requiredSpace && spaceBefore.length === 0); - rawSpace += spaceBefore + n.value + _this2.lossySpace(n.rawSpaceAfter, requiredSpace && rawSpaceBefore.length === 0); - }); - - if (rawSpace === space) { - rawSpace = undefined; - } - - var result = { - space: space, - rawSpace: rawSpace - }; - return result; - }; - - _proto.isNamedCombinator = function isNamedCombinator(position) { - if (position === void 0) { - position = this.position; - } - - return this.tokens[position + 0] && this.tokens[position + 0][_tokenize.FIELDS.TYPE] === tokens.slash && this.tokens[position + 1] && this.tokens[position + 1][_tokenize.FIELDS.TYPE] === tokens.word && this.tokens[position + 2] && this.tokens[position + 2][_tokenize.FIELDS.TYPE] === tokens.slash; - }; - - _proto.namedCombinator = function namedCombinator() { - if (this.isNamedCombinator()) { - var nameRaw = this.content(this.tokens[this.position + 1]); - var name = (0, _util.unesc)(nameRaw).toLowerCase(); - var raws = {}; - - if (name !== nameRaw) { - raws.value = "/" + nameRaw + "/"; - } - - var node = new _combinator["default"]({ - value: "/" + name + "/", - source: getSource(this.currToken[_tokenize.FIELDS.START_LINE], this.currToken[_tokenize.FIELDS.START_COL], this.tokens[this.position + 2][_tokenize.FIELDS.END_LINE], this.tokens[this.position + 2][_tokenize.FIELDS.END_COL]), - sourceIndex: this.currToken[_tokenize.FIELDS.START_POS], - raws: raws - }); - this.position = this.position + 3; - return node; - } else { - this.unexpected(); - } - }; - - _proto.combinator = function combinator() { - var _this3 = this; - - if (this.content() === '|') { - return this.namespace(); - } // We need to decide between a space that's a descendant combinator and meaningless whitespace at the end of a selector. - - - var nextSigTokenPos = this.locateNextMeaningfulToken(this.position); - - if (nextSigTokenPos < 0 || this.tokens[nextSigTokenPos][_tokenize.FIELDS.TYPE] === tokens.comma) { - var nodes = this.parseWhitespaceEquivalentTokens(nextSigTokenPos); - - if (nodes.length > 0) { - var last = this.current.last; - - if (last) { - var _this$convertWhitespa = this.convertWhitespaceNodesToSpace(nodes), - space = _this$convertWhitespa.space, - rawSpace = _this$convertWhitespa.rawSpace; - - if (rawSpace !== undefined) { - last.rawSpaceAfter += rawSpace; - } - - last.spaces.after += space; - } else { - nodes.forEach(function (n) { - return _this3.newNode(n); - }); - } - } - - return; - } - - var firstToken = this.currToken; - var spaceOrDescendantSelectorNodes = undefined; - - if (nextSigTokenPos > this.position) { - spaceOrDescendantSelectorNodes = this.parseWhitespaceEquivalentTokens(nextSigTokenPos); - } - - var node; - - if (this.isNamedCombinator()) { - node = this.namedCombinator(); - } else if (this.currToken[_tokenize.FIELDS.TYPE] === tokens.combinator) { - node = new _combinator["default"]({ - value: this.content(), - source: getTokenSource(this.currToken), - sourceIndex: this.currToken[_tokenize.FIELDS.START_POS] - }); - this.position++; - } else if (WHITESPACE_TOKENS[this.currToken[_tokenize.FIELDS.TYPE]]) ; else if (!spaceOrDescendantSelectorNodes) { - this.unexpected(); - } - - if (node) { - if (spaceOrDescendantSelectorNodes) { - var _this$convertWhitespa2 = this.convertWhitespaceNodesToSpace(spaceOrDescendantSelectorNodes), - _space = _this$convertWhitespa2.space, - _rawSpace = _this$convertWhitespa2.rawSpace; - - node.spaces.before = _space; - node.rawSpaceBefore = _rawSpace; - } - } else { - // descendant combinator - var _this$convertWhitespa3 = this.convertWhitespaceNodesToSpace(spaceOrDescendantSelectorNodes, true), - _space2 = _this$convertWhitespa3.space, - _rawSpace2 = _this$convertWhitespa3.rawSpace; - - if (!_rawSpace2) { - _rawSpace2 = _space2; - } - - var spaces = {}; - var raws = { - spaces: {} - }; - - if (_space2.endsWith(' ') && _rawSpace2.endsWith(' ')) { - spaces.before = _space2.slice(0, _space2.length - 1); - raws.spaces.before = _rawSpace2.slice(0, _rawSpace2.length - 1); - } else if (_space2.startsWith(' ') && _rawSpace2.startsWith(' ')) { - spaces.after = _space2.slice(1); - raws.spaces.after = _rawSpace2.slice(1); - } else { - raws.value = _rawSpace2; - } - - node = new _combinator["default"]({ - value: ' ', - source: getTokenSourceSpan(firstToken, this.tokens[this.position - 1]), - sourceIndex: firstToken[_tokenize.FIELDS.START_POS], - spaces: spaces, - raws: raws - }); - } - - if (this.currToken && this.currToken[_tokenize.FIELDS.TYPE] === tokens.space) { - node.spaces.after = this.optionalSpace(this.content()); - this.position++; - } - - return this.newNode(node); - }; - - _proto.comma = function comma() { - if (this.position === this.tokens.length - 1) { - this.root.trailingComma = true; - this.position++; - return; - } - - this.current._inferEndPosition(); - - var selector = new _selector["default"]({ - source: { - start: tokenStart(this.tokens[this.position + 1]) - } - }); - this.current.parent.append(selector); - this.current = selector; - this.position++; - }; - - _proto.comment = function comment() { - var current = this.currToken; - this.newNode(new _comment["default"]({ - value: this.content(), - source: getTokenSource(current), - sourceIndex: current[_tokenize.FIELDS.START_POS] - })); - this.position++; - }; - - _proto.error = function error(message, opts) { - throw this.root.error(message, opts); - }; - - _proto.missingBackslash = function missingBackslash() { - return this.error('Expected a backslash preceding the semicolon.', { - index: this.currToken[_tokenize.FIELDS.START_POS] - }); - }; - - _proto.missingParenthesis = function missingParenthesis() { - return this.expected('opening parenthesis', this.currToken[_tokenize.FIELDS.START_POS]); - }; - - _proto.missingSquareBracket = function missingSquareBracket() { - return this.expected('opening square bracket', this.currToken[_tokenize.FIELDS.START_POS]); - }; - - _proto.unexpected = function unexpected() { - return this.error("Unexpected '" + this.content() + "'. Escaping special characters with \\ may help.", this.currToken[_tokenize.FIELDS.START_POS]); - }; - - _proto.namespace = function namespace() { - var before = this.prevToken && this.content(this.prevToken) || true; - - if (this.nextToken[_tokenize.FIELDS.TYPE] === tokens.word) { - this.position++; - return this.word(before); - } else if (this.nextToken[_tokenize.FIELDS.TYPE] === tokens.asterisk) { - this.position++; - return this.universal(before); - } - }; - - _proto.nesting = function nesting() { - if (this.nextToken) { - var nextContent = this.content(this.nextToken); - - if (nextContent === "|") { - this.position++; - return; - } - } - - var current = this.currToken; - this.newNode(new _nesting["default"]({ - value: this.content(), - source: getTokenSource(current), - sourceIndex: current[_tokenize.FIELDS.START_POS] - })); - this.position++; - }; - - _proto.parentheses = function parentheses() { - var last = this.current.last; - var unbalanced = 1; - this.position++; - - if (last && last.type === types$1.PSEUDO) { - var selector = new _selector["default"]({ - source: { - start: tokenStart(this.tokens[this.position - 1]) - } - }); - var cache = this.current; - last.append(selector); - this.current = selector; - - while (this.position < this.tokens.length && unbalanced) { - if (this.currToken[_tokenize.FIELDS.TYPE] === tokens.openParenthesis) { - unbalanced++; - } - - if (this.currToken[_tokenize.FIELDS.TYPE] === tokens.closeParenthesis) { - unbalanced--; - } - - if (unbalanced) { - this.parse(); - } else { - this.current.source.end = tokenEnd(this.currToken); - this.current.parent.source.end = tokenEnd(this.currToken); - this.position++; - } - } - - this.current = cache; - } else { - // I think this case should be an error. It's used to implement a basic parse of media queries - // but I don't think it's a good idea. - var parenStart = this.currToken; - var parenValue = "("; - var parenEnd; - - while (this.position < this.tokens.length && unbalanced) { - if (this.currToken[_tokenize.FIELDS.TYPE] === tokens.openParenthesis) { - unbalanced++; - } - - if (this.currToken[_tokenize.FIELDS.TYPE] === tokens.closeParenthesis) { - unbalanced--; - } - - parenEnd = this.currToken; - parenValue += this.parseParenthesisToken(this.currToken); - this.position++; - } - - if (last) { - last.appendToPropertyAndEscape("value", parenValue, parenValue); - } else { - this.newNode(new _string["default"]({ - value: parenValue, - source: getSource(parenStart[_tokenize.FIELDS.START_LINE], parenStart[_tokenize.FIELDS.START_COL], parenEnd[_tokenize.FIELDS.END_LINE], parenEnd[_tokenize.FIELDS.END_COL]), - sourceIndex: parenStart[_tokenize.FIELDS.START_POS] - })); - } - } - - if (unbalanced) { - return this.expected('closing parenthesis', this.currToken[_tokenize.FIELDS.START_POS]); - } - }; - - _proto.pseudo = function pseudo() { - var _this4 = this; - - var pseudoStr = ''; - var startingToken = this.currToken; - - while (this.currToken && this.currToken[_tokenize.FIELDS.TYPE] === tokens.colon) { - pseudoStr += this.content(); - this.position++; - } - - if (!this.currToken) { - return this.expected(['pseudo-class', 'pseudo-element'], this.position - 1); - } - - if (this.currToken[_tokenize.FIELDS.TYPE] === tokens.word) { - this.splitWord(false, function (first, length) { - pseudoStr += first; - - _this4.newNode(new _pseudo["default"]({ - value: pseudoStr, - source: getTokenSourceSpan(startingToken, _this4.currToken), - sourceIndex: startingToken[_tokenize.FIELDS.START_POS] - })); - - if (length > 1 && _this4.nextToken && _this4.nextToken[_tokenize.FIELDS.TYPE] === tokens.openParenthesis) { - _this4.error('Misplaced parenthesis.', { - index: _this4.nextToken[_tokenize.FIELDS.START_POS] - }); - } - }); - } else { - return this.expected(['pseudo-class', 'pseudo-element'], this.currToken[_tokenize.FIELDS.START_POS]); - } - }; - - _proto.space = function space() { - var content = this.content(); // Handle space before and after the selector - - if (this.position === 0 || this.prevToken[_tokenize.FIELDS.TYPE] === tokens.comma || this.prevToken[_tokenize.FIELDS.TYPE] === tokens.openParenthesis || this.current.nodes.every(function (node) { - return node.type === 'comment'; - })) { - this.spaces = this.optionalSpace(content); - this.position++; - } else if (this.position === this.tokens.length - 1 || this.nextToken[_tokenize.FIELDS.TYPE] === tokens.comma || this.nextToken[_tokenize.FIELDS.TYPE] === tokens.closeParenthesis) { - this.current.last.spaces.after = this.optionalSpace(content); - this.position++; - } else { - this.combinator(); - } - }; - - _proto.string = function string() { - var current = this.currToken; - this.newNode(new _string["default"]({ - value: this.content(), - source: getTokenSource(current), - sourceIndex: current[_tokenize.FIELDS.START_POS] - })); - this.position++; - }; - - _proto.universal = function universal(namespace) { - var nextToken = this.nextToken; - - if (nextToken && this.content(nextToken) === '|') { - this.position++; - return this.namespace(); - } - - var current = this.currToken; - this.newNode(new _universal["default"]({ - value: this.content(), - source: getTokenSource(current), - sourceIndex: current[_tokenize.FIELDS.START_POS] - }), namespace); - this.position++; - }; - - _proto.splitWord = function splitWord(namespace, firstCallback) { - var _this5 = this; - - var nextToken = this.nextToken; - var word = this.content(); - - while (nextToken && ~[tokens.dollar, tokens.caret, tokens.equals, tokens.word].indexOf(nextToken[_tokenize.FIELDS.TYPE])) { - this.position++; - var current = this.content(); - word += current; - - if (current.lastIndexOf('\\') === current.length - 1) { - var next = this.nextToken; - - if (next && next[_tokenize.FIELDS.TYPE] === tokens.space) { - word += this.requiredSpace(this.content(next)); - this.position++; - } - } - - nextToken = this.nextToken; - } - - var hasClass = indexesOf(word, '.').filter(function (i) { - // Allow escaped dot within class name - var escapedDot = word[i - 1] === '\\'; // Allow decimal numbers percent in @keyframes - - var isKeyframesPercent = /^\d+\.\d+%$/.test(word); - return !escapedDot && !isKeyframesPercent; - }); - var hasId = indexesOf(word, '#').filter(function (i) { - return word[i - 1] !== '\\'; - }); // Eliminate Sass interpolations from the list of id indexes - - var interpolations = indexesOf(word, '#{'); - - if (interpolations.length) { - hasId = hasId.filter(function (hashIndex) { - return !~interpolations.indexOf(hashIndex); - }); - } - - var indices = (0, _sortAscending["default"])(uniqs([0].concat(hasClass, hasId))); - indices.forEach(function (ind, i) { - var index = indices[i + 1] || word.length; - var value = word.slice(ind, index); - - if (i === 0 && firstCallback) { - return firstCallback.call(_this5, value, indices.length); - } - - var node; - var current = _this5.currToken; - var sourceIndex = current[_tokenize.FIELDS.START_POS] + indices[i]; - var source = getSource(current[1], current[2] + ind, current[3], current[2] + (index - 1)); - - if (~hasClass.indexOf(ind)) { - var classNameOpts = { - value: value.slice(1), - source: source, - sourceIndex: sourceIndex - }; - node = new _className["default"](unescapeProp(classNameOpts, "value")); - } else if (~hasId.indexOf(ind)) { - var idOpts = { - value: value.slice(1), - source: source, - sourceIndex: sourceIndex - }; - node = new _id["default"](unescapeProp(idOpts, "value")); - } else { - var tagOpts = { - value: value, - source: source, - sourceIndex: sourceIndex - }; - unescapeProp(tagOpts, "value"); - node = new _tag["default"](tagOpts); - } - - _this5.newNode(node, namespace); // Ensure that the namespace is used only once - - - namespace = null; - }); - this.position++; - }; - - _proto.word = function word(namespace) { - var nextToken = this.nextToken; - - if (nextToken && this.content(nextToken) === '|') { - this.position++; - return this.namespace(); - } - - return this.splitWord(namespace); - }; - - _proto.loop = function loop() { - while (this.position < this.tokens.length) { - this.parse(true); - } - - this.current._inferEndPosition(); - - return this.root; - }; - - _proto.parse = function parse(throwOnParenthesis) { - switch (this.currToken[_tokenize.FIELDS.TYPE]) { - case tokens.space: - this.space(); - break; - - case tokens.comment: - this.comment(); - break; - - case tokens.openParenthesis: - this.parentheses(); - break; - - case tokens.closeParenthesis: - if (throwOnParenthesis) { - this.missingParenthesis(); - } - - break; - - case tokens.openSquare: - this.attribute(); - break; - - case tokens.dollar: - case tokens.caret: - case tokens.equals: - case tokens.word: - this.word(); - break; - - case tokens.colon: - this.pseudo(); - break; - - case tokens.comma: - this.comma(); - break; - - case tokens.asterisk: - this.universal(); - break; - - case tokens.ampersand: - this.nesting(); - break; - - case tokens.slash: - case tokens.combinator: - this.combinator(); - break; - - case tokens.str: - this.string(); - break; - // These cases throw; no break needed. - - case tokens.closeSquare: - this.missingSquareBracket(); - - case tokens.semicolon: - this.missingBackslash(); - - default: - this.unexpected(); - } - } - /** - * Helpers - */ - ; - - _proto.expected = function expected(description, index, found) { - if (Array.isArray(description)) { - var last = description.pop(); - description = description.join(', ') + " or " + last; - } - - var an = /^[aeiou]/.test(description[0]) ? 'an' : 'a'; - - if (!found) { - return this.error("Expected " + an + " " + description + ".", { - index: index - }); - } - - return this.error("Expected " + an + " " + description + ", found \"" + found + "\" instead.", { - index: index - }); - }; - - _proto.requiredSpace = function requiredSpace(space) { - return this.options.lossy ? ' ' : space; - }; - - _proto.optionalSpace = function optionalSpace(space) { - return this.options.lossy ? '' : space; - }; - - _proto.lossySpace = function lossySpace(space, required) { - if (this.options.lossy) { - return required ? ' ' : ''; - } else { - return space; - } - }; - - _proto.parseParenthesisToken = function parseParenthesisToken(token) { - var content = this.content(token); - - if (token[_tokenize.FIELDS.TYPE] === tokens.space) { - return this.requiredSpace(content); - } else { - return content; - } - }; - - _proto.newNode = function newNode(node, namespace) { - if (namespace) { - if (/^ +$/.test(namespace)) { - if (!this.options.lossy) { - this.spaces = (this.spaces || '') + namespace; - } - - namespace = true; - } - - node.namespace = namespace; - unescapeProp(node, "namespace"); - } - - if (this.spaces) { - node.spaces.before = this.spaces; - this.spaces = ''; - } - - return this.current.append(node); - }; - - _proto.content = function content(token) { - if (token === void 0) { - token = this.currToken; - } - - return this.css.slice(token[_tokenize.FIELDS.START_POS], token[_tokenize.FIELDS.END_POS]); - }; - - /** - * returns the index of the next non-whitespace, non-comment token. - * returns -1 if no meaningful token is found. - */ - _proto.locateNextMeaningfulToken = function locateNextMeaningfulToken(startPosition) { - if (startPosition === void 0) { - startPosition = this.position + 1; - } - - var searchPosition = startPosition; - - while (searchPosition < this.tokens.length) { - if (WHITESPACE_EQUIV_TOKENS[this.tokens[searchPosition][_tokenize.FIELDS.TYPE]]) { - searchPosition++; - continue; - } else { - return searchPosition; - } - } - - return -1; - }; - - _createClass(Parser, [{ - key: "currToken", - get: function get() { - return this.tokens[this.position]; - } - }, { - key: "nextToken", - get: function get() { - return this.tokens[this.position + 1]; - } - }, { - key: "prevToken", - get: function get() { - return this.tokens[this.position - 1]; - } - }]); - - return Parser; - }(); - - exports["default"] = Parser; - module.exports = exports.default; -} (parser, parser.exports)); - -var parserExports = parser.exports; - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _parser = _interopRequireDefault(parserExports); - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - var Processor = /*#__PURE__*/function () { - function Processor(func, options) { - this.func = func || function noop() {}; - - this.funcRes = null; - this.options = options; - } - - var _proto = Processor.prototype; - - _proto._shouldUpdateSelector = function _shouldUpdateSelector(rule, options) { - if (options === void 0) { - options = {}; - } - - var merged = Object.assign({}, this.options, options); - - if (merged.updateSelector === false) { - return false; - } else { - return typeof rule !== "string"; - } - }; - - _proto._isLossy = function _isLossy(options) { - if (options === void 0) { - options = {}; - } - - var merged = Object.assign({}, this.options, options); - - if (merged.lossless === false) { - return true; - } else { - return false; - } - }; - - _proto._root = function _root(rule, options) { - if (options === void 0) { - options = {}; - } - - var parser = new _parser["default"](rule, this._parseOptions(options)); - return parser.root; - }; - - _proto._parseOptions = function _parseOptions(options) { - return { - lossy: this._isLossy(options) - }; - }; - - _proto._run = function _run(rule, options) { - var _this = this; - - if (options === void 0) { - options = {}; - } - - return new Promise(function (resolve, reject) { - try { - var root = _this._root(rule, options); - - Promise.resolve(_this.func(root)).then(function (transform) { - var string = undefined; - - if (_this._shouldUpdateSelector(rule, options)) { - string = root.toString(); - rule.selector = string; - } - - return { - transform: transform, - root: root, - string: string - }; - }).then(resolve, reject); - } catch (e) { - reject(e); - return; - } - }); - }; - - _proto._runSync = function _runSync(rule, options) { - if (options === void 0) { - options = {}; - } - - var root = this._root(rule, options); - - var transform = this.func(root); - - if (transform && typeof transform.then === "function") { - throw new Error("Selector processor returned a promise to a synchronous call."); - } - - var string = undefined; - - if (options.updateSelector && typeof rule !== "string") { - string = root.toString(); - rule.selector = string; - } - - return { - transform: transform, - root: root, - string: string - }; - } - /** - * Process rule into a selector AST. - * - * @param rule {postcss.Rule | string} The css selector to be processed - * @param options The options for processing - * @returns {Promise} The AST of the selector after processing it. - */ - ; - - _proto.ast = function ast(rule, options) { - return this._run(rule, options).then(function (result) { - return result.root; - }); - } - /** - * Process rule into a selector AST synchronously. - * - * @param rule {postcss.Rule | string} The css selector to be processed - * @param options The options for processing - * @returns {parser.Root} The AST of the selector after processing it. - */ - ; - - _proto.astSync = function astSync(rule, options) { - return this._runSync(rule, options).root; - } - /** - * Process a selector into a transformed value asynchronously - * - * @param rule {postcss.Rule | string} The css selector to be processed - * @param options The options for processing - * @returns {Promise} The value returned by the processor. - */ - ; - - _proto.transform = function transform(rule, options) { - return this._run(rule, options).then(function (result) { - return result.transform; - }); - } - /** - * Process a selector into a transformed value synchronously. - * - * @param rule {postcss.Rule | string} The css selector to be processed - * @param options The options for processing - * @returns {any} The value returned by the processor. - */ - ; - - _proto.transformSync = function transformSync(rule, options) { - return this._runSync(rule, options).transform; - } - /** - * Process a selector into a new selector string asynchronously. - * - * @param rule {postcss.Rule | string} The css selector to be processed - * @param options The options for processing - * @returns {string} the selector after processing. - */ - ; - - _proto.process = function process(rule, options) { - return this._run(rule, options).then(function (result) { - return result.string || result.root.toString(); - }); - } - /** - * Process a selector into a new selector string synchronously. - * - * @param rule {postcss.Rule | string} The css selector to be processed - * @param options The options for processing - * @returns {string} the selector after processing. - */ - ; - - _proto.processSync = function processSync(rule, options) { - var result = this._runSync(rule, options); - - return result.string || result.root.toString(); - }; - - return Processor; - }(); - - exports["default"] = Processor; - module.exports = exports.default; -} (processor, processor.exports)); - -var processorExports = processor.exports; - -var selectors = {}; - -var constructors = {}; - -constructors.__esModule = true; -constructors.universal = constructors.tag = constructors.string = constructors.selector = constructors.root = constructors.pseudo = constructors.nesting = constructors.id = constructors.comment = constructors.combinator = constructors.className = constructors.attribute = void 0; - -var _attribute = _interopRequireDefault$2(attribute$1); - -var _className = _interopRequireDefault$2(classNameExports); - -var _combinator = _interopRequireDefault$2(combinatorExports); - -var _comment = _interopRequireDefault$2(commentExports); - -var _id = _interopRequireDefault$2(idExports); - -var _nesting = _interopRequireDefault$2(nestingExports); - -var _pseudo = _interopRequireDefault$2(pseudoExports); - -var _root = _interopRequireDefault$2(rootExports); - -var _selector = _interopRequireDefault$2(selectorExports); - -var _string = _interopRequireDefault$2(stringExports); - -var _tag = _interopRequireDefault$2(tagExports); - -var _universal = _interopRequireDefault$2(universalExports); - -function _interopRequireDefault$2(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - -var attribute = function attribute(opts) { - return new _attribute["default"](opts); -}; - -constructors.attribute = attribute; - -var className = function className(opts) { - return new _className["default"](opts); -}; - -constructors.className = className; - -var combinator = function combinator(opts) { - return new _combinator["default"](opts); -}; - -constructors.combinator = combinator; - -var comment = function comment(opts) { - return new _comment["default"](opts); -}; - -constructors.comment = comment; - -var id = function id(opts) { - return new _id["default"](opts); -}; - -constructors.id = id; - -var nesting = function nesting(opts) { - return new _nesting["default"](opts); -}; - -constructors.nesting = nesting; - -var pseudo = function pseudo(opts) { - return new _pseudo["default"](opts); -}; - -constructors.pseudo = pseudo; - -var root = function root(opts) { - return new _root["default"](opts); -}; - -constructors.root = root; - -var selector = function selector(opts) { - return new _selector["default"](opts); -}; - -constructors.selector = selector; - -var string = function string(opts) { - return new _string["default"](opts); -}; - -constructors.string = string; - -var tag = function tag(opts) { - return new _tag["default"](opts); -}; - -constructors.tag = tag; - -var universal = function universal(opts) { - return new _universal["default"](opts); -}; - -constructors.universal = universal; - -var guards = {}; - -guards.__esModule = true; -guards.isNode = isNode; -guards.isPseudoElement = isPseudoElement; -guards.isPseudoClass = isPseudoClass; -guards.isContainer = isContainer; -guards.isNamespace = isNamespace; -guards.isUniversal = guards.isTag = guards.isString = guards.isSelector = guards.isRoot = guards.isPseudo = guards.isNesting = guards.isIdentifier = guards.isComment = guards.isCombinator = guards.isClassName = guards.isAttribute = void 0; - -var _types = types; - -var _IS_TYPE; - -var IS_TYPE = (_IS_TYPE = {}, _IS_TYPE[_types.ATTRIBUTE] = true, _IS_TYPE[_types.CLASS] = true, _IS_TYPE[_types.COMBINATOR] = true, _IS_TYPE[_types.COMMENT] = true, _IS_TYPE[_types.ID] = true, _IS_TYPE[_types.NESTING] = true, _IS_TYPE[_types.PSEUDO] = true, _IS_TYPE[_types.ROOT] = true, _IS_TYPE[_types.SELECTOR] = true, _IS_TYPE[_types.STRING] = true, _IS_TYPE[_types.TAG] = true, _IS_TYPE[_types.UNIVERSAL] = true, _IS_TYPE); - -function isNode(node) { - return typeof node === "object" && IS_TYPE[node.type]; -} - -function isNodeType(type, node) { - return isNode(node) && node.type === type; -} - -var isAttribute = isNodeType.bind(null, _types.ATTRIBUTE); -guards.isAttribute = isAttribute; -var isClassName = isNodeType.bind(null, _types.CLASS); -guards.isClassName = isClassName; -var isCombinator = isNodeType.bind(null, _types.COMBINATOR); -guards.isCombinator = isCombinator; -var isComment = isNodeType.bind(null, _types.COMMENT); -guards.isComment = isComment; -var isIdentifier = isNodeType.bind(null, _types.ID); -guards.isIdentifier = isIdentifier; -var isNesting = isNodeType.bind(null, _types.NESTING); -guards.isNesting = isNesting; -var isPseudo = isNodeType.bind(null, _types.PSEUDO); -guards.isPseudo = isPseudo; -var isRoot = isNodeType.bind(null, _types.ROOT); -guards.isRoot = isRoot; -var isSelector = isNodeType.bind(null, _types.SELECTOR); -guards.isSelector = isSelector; -var isString = isNodeType.bind(null, _types.STRING); -guards.isString = isString; -var isTag = isNodeType.bind(null, _types.TAG); -guards.isTag = isTag; -var isUniversal = isNodeType.bind(null, _types.UNIVERSAL); -guards.isUniversal = isUniversal; - -function isPseudoElement(node) { - return isPseudo(node) && node.value && (node.value.startsWith("::") || node.value.toLowerCase() === ":before" || node.value.toLowerCase() === ":after" || node.value.toLowerCase() === ":first-letter" || node.value.toLowerCase() === ":first-line"); -} - -function isPseudoClass(node) { - return isPseudo(node) && !isPseudoElement(node); -} - -function isContainer(node) { - return !!(isNode(node) && node.walk); -} - -function isNamespace(node) { - return isAttribute(node) || isTag(node); -} - -(function (exports) { - - exports.__esModule = true; - - var _types = types; - - Object.keys(_types).forEach(function (key) { - if (key === "default" || key === "__esModule") return; - if (key in exports && exports[key] === _types[key]) return; - exports[key] = _types[key]; - }); - - var _constructors = constructors; - - Object.keys(_constructors).forEach(function (key) { - if (key === "default" || key === "__esModule") return; - if (key in exports && exports[key] === _constructors[key]) return; - exports[key] = _constructors[key]; - }); - - var _guards = guards; - - Object.keys(_guards).forEach(function (key) { - if (key === "default" || key === "__esModule") return; - if (key in exports && exports[key] === _guards[key]) return; - exports[key] = _guards[key]; - }); -} (selectors)); - -(function (module, exports) { - - exports.__esModule = true; - exports["default"] = void 0; - - var _processor = _interopRequireDefault(processorExports); - - var selectors$1 = _interopRequireWildcard(selectors); - - function _getRequireWildcardCache() { if (typeof WeakMap !== "function") return null; var cache = new WeakMap(); _getRequireWildcardCache = function _getRequireWildcardCache() { return cache; }; return cache; } - - function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } if (obj === null || typeof obj !== "object" && typeof obj !== "function") { return { "default": obj }; } var cache = _getRequireWildcardCache(); if (cache && cache.has(obj)) { return cache.get(obj); } var newObj = {}; var hasPropertyDescriptor = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { var desc = hasPropertyDescriptor ? Object.getOwnPropertyDescriptor(obj, key) : null; if (desc && (desc.get || desc.set)) { Object.defineProperty(newObj, key, desc); } else { newObj[key] = obj[key]; } } } newObj["default"] = obj; if (cache) { cache.set(obj, newObj); } return newObj; } - - function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } - - var parser = function parser(processor) { - return new _processor["default"](processor); - }; - - Object.assign(parser, selectors$1); - delete parser.__esModule; - var _default = parser; - exports["default"] = _default; - module.exports = exports.default; -} (dist, dist.exports)); - -var distExports = dist.exports; - -const selectorParser$1 = distExports; -const valueParser = lib; -const { extractICSS } = src$4; - -const isSpacing = (node) => node.type === "combinator" && node.value === " "; - -function normalizeNodeArray(nodes) { - const array = []; - - nodes.forEach((x) => { - if (Array.isArray(x)) { - normalizeNodeArray(x).forEach((item) => { - array.push(item); - }); - } else if (x) { - array.push(x); - } - }); - - if (array.length > 0 && isSpacing(array[array.length - 1])) { - array.pop(); - } - return array; -} - -function localizeNode(rule, mode, localAliasMap) { - const transform = (node, context) => { - if (context.ignoreNextSpacing && !isSpacing(node)) { - throw new Error("Missing whitespace after " + context.ignoreNextSpacing); - } - - if (context.enforceNoSpacing && isSpacing(node)) { - throw new Error("Missing whitespace before " + context.enforceNoSpacing); - } - - let newNodes; - - switch (node.type) { - case "root": { - let resultingGlobal; - - context.hasPureGlobals = false; - - newNodes = node.nodes.map((n) => { - const nContext = { - global: context.global, - lastWasSpacing: true, - hasLocals: false, - explicit: false, - }; - - n = transform(n, nContext); - - if (typeof resultingGlobal === "undefined") { - resultingGlobal = nContext.global; - } else if (resultingGlobal !== nContext.global) { - throw new Error( - 'Inconsistent rule global/local result in rule "' + - node + - '" (multiple selectors must result in the same mode for the rule)' - ); - } - - if (!nContext.hasLocals) { - context.hasPureGlobals = true; - } - - return n; - }); - - context.global = resultingGlobal; - - node.nodes = normalizeNodeArray(newNodes); - break; - } - case "selector": { - newNodes = node.map((childNode) => transform(childNode, context)); - - node = node.clone(); - node.nodes = normalizeNodeArray(newNodes); - break; - } - case "combinator": { - if (isSpacing(node)) { - if (context.ignoreNextSpacing) { - context.ignoreNextSpacing = false; - context.lastWasSpacing = false; - context.enforceNoSpacing = false; - return null; - } - context.lastWasSpacing = true; - return node; - } - break; - } - case "pseudo": { - let childContext; - const isNested = !!node.length; - const isScoped = node.value === ":local" || node.value === ":global"; - const isImportExport = - node.value === ":import" || node.value === ":export"; - - if (isImportExport) { - context.hasLocals = true; - // :local(.foo) - } else if (isNested) { - if (isScoped) { - if (node.nodes.length === 0) { - throw new Error(`${node.value}() can't be empty`); - } - - if (context.inside) { - throw new Error( - `A ${node.value} is not allowed inside of a ${context.inside}(...)` - ); - } - - childContext = { - global: node.value === ":global", - inside: node.value, - hasLocals: false, - explicit: true, - }; - - newNodes = node - .map((childNode) => transform(childNode, childContext)) - .reduce((acc, next) => acc.concat(next.nodes), []); - - if (newNodes.length) { - const { before, after } = node.spaces; - - const first = newNodes[0]; - const last = newNodes[newNodes.length - 1]; - - first.spaces = { before, after: first.spaces.after }; - last.spaces = { before: last.spaces.before, after }; - } - - node = newNodes; - - break; - } else { - childContext = { - global: context.global, - inside: context.inside, - lastWasSpacing: true, - hasLocals: false, - explicit: context.explicit, - }; - newNodes = node.map((childNode) => - transform(childNode, childContext) - ); - - node = node.clone(); - node.nodes = normalizeNodeArray(newNodes); - - if (childContext.hasLocals) { - context.hasLocals = true; - } - } - break; - - //:local .foo .bar - } else if (isScoped) { - if (context.inside) { - throw new Error( - `A ${node.value} is not allowed inside of a ${context.inside}(...)` - ); - } - - const addBackSpacing = !!node.spaces.before; - - context.ignoreNextSpacing = context.lastWasSpacing - ? node.value - : false; - - context.enforceNoSpacing = context.lastWasSpacing - ? false - : node.value; - - context.global = node.value === ":global"; - context.explicit = true; - - // because this node has spacing that is lost when we remove it - // we make up for it by adding an extra combinator in since adding - // spacing on the parent selector doesn't work - return addBackSpacing - ? selectorParser$1.combinator({ value: " " }) - : null; - } - break; - } - case "id": - case "class": { - if (!node.value) { - throw new Error("Invalid class or id selector syntax"); - } - - if (context.global) { - break; - } - - const isImportedValue = localAliasMap.has(node.value); - const isImportedWithExplicitScope = isImportedValue && context.explicit; - - if (!isImportedValue || isImportedWithExplicitScope) { - const innerNode = node.clone(); - innerNode.spaces = { before: "", after: "" }; - - node = selectorParser$1.pseudo({ - value: ":local", - nodes: [innerNode], - spaces: node.spaces, - }); - - context.hasLocals = true; - } - - break; - } - } - - context.lastWasSpacing = false; - context.ignoreNextSpacing = false; - context.enforceNoSpacing = false; - - return node; - }; - - const rootContext = { - global: mode === "global", - hasPureGlobals: false, - }; - - rootContext.selector = selectorParser$1((root) => { - transform(root, rootContext); - }).processSync(rule, { updateSelector: false, lossless: true }); - - return rootContext; -} - -function localizeDeclNode(node, context) { - switch (node.type) { - case "word": - if (context.localizeNextItem) { - if (!context.localAliasMap.has(node.value)) { - node.value = ":local(" + node.value + ")"; - context.localizeNextItem = false; - } - } - break; - - case "function": - if ( - context.options && - context.options.rewriteUrl && - node.value.toLowerCase() === "url" - ) { - node.nodes.map((nestedNode) => { - if (nestedNode.type !== "string" && nestedNode.type !== "word") { - return; - } - - let newUrl = context.options.rewriteUrl( - context.global, - nestedNode.value - ); - - switch (nestedNode.type) { - case "string": - if (nestedNode.quote === "'") { - newUrl = newUrl.replace(/(\\)/g, "\\$1").replace(/'/g, "\\'"); - } - - if (nestedNode.quote === '"') { - newUrl = newUrl.replace(/(\\)/g, "\\$1").replace(/"/g, '\\"'); - } - - break; - case "word": - newUrl = newUrl.replace(/("|'|\)|\\)/g, "\\$1"); - break; - } - - nestedNode.value = newUrl; - }); - } - break; - } - return node; -} - -function isWordAFunctionArgument(wordNode, functionNode) { - return functionNode - ? functionNode.nodes.some( - (functionNodeChild) => - functionNodeChild.sourceIndex === wordNode.sourceIndex - ) - : false; -} - -function localizeDeclarationValues(localize, declaration, context) { - const valueNodes = valueParser(declaration.value); - - valueNodes.walk((node, index, nodes) => { - const subContext = { - options: context.options, - global: context.global, - localizeNextItem: localize && !context.global, - localAliasMap: context.localAliasMap, - }; - nodes[index] = localizeDeclNode(node, subContext); - }); - - declaration.value = valueNodes.toString(); -} - -function localizeDeclaration(declaration, context) { - const isAnimation = /animation$/i.test(declaration.prop); - - if (isAnimation) { - const validIdent = /^-?[_a-z][_a-z0-9-]*$/i; - - /* - The spec defines some keywords that you can use to describe properties such as the timing - function. These are still valid animation names, so as long as there is a property that accepts - a keyword, it is given priority. Only when all the properties that can take a keyword are - exhausted can the animation name be set to the keyword. I.e. - - animation: infinite infinite; - - The animation will repeat an infinite number of times from the first argument, and will have an - animation name of infinite from the second. - */ - const animationKeywords = { - $alternate: 1, - "$alternate-reverse": 1, - $backwards: 1, - $both: 1, - $ease: 1, - "$ease-in": 1, - "$ease-in-out": 1, - "$ease-out": 1, - $forwards: 1, - $infinite: 1, - $linear: 1, - $none: Infinity, // No matter how many times you write none, it will never be an animation name - $normal: 1, - $paused: 1, - $reverse: 1, - $running: 1, - "$step-end": 1, - "$step-start": 1, - $initial: Infinity, - $inherit: Infinity, - $unset: Infinity, - }; - let parsedAnimationKeywords = {}; - let stepsFunctionNode = null; - const valueNodes = valueParser(declaration.value).walk((node) => { - /* If div-token appeared (represents as comma ','), a possibility of an animation-keywords should be reflesh. */ - if (node.type === "div") { - parsedAnimationKeywords = {}; - } - if (node.type === "function" && node.value.toLowerCase() === "steps") { - stepsFunctionNode = node; - } - const value = - node.type === "word" && - !isWordAFunctionArgument(node, stepsFunctionNode) - ? node.value.toLowerCase() - : null; - - let shouldParseAnimationName = false; - - if (value && validIdent.test(value)) { - if ("$" + value in animationKeywords) { - parsedAnimationKeywords["$" + value] = - "$" + value in parsedAnimationKeywords - ? parsedAnimationKeywords["$" + value] + 1 - : 0; - - shouldParseAnimationName = - parsedAnimationKeywords["$" + value] >= - animationKeywords["$" + value]; - } else { - shouldParseAnimationName = true; - } - } - - const subContext = { - options: context.options, - global: context.global, - localizeNextItem: shouldParseAnimationName && !context.global, - localAliasMap: context.localAliasMap, - }; - return localizeDeclNode(node, subContext); - }); - - declaration.value = valueNodes.toString(); - - return; - } - - const isAnimationName = /animation(-name)?$/i.test(declaration.prop); - - if (isAnimationName) { - return localizeDeclarationValues(true, declaration, context); - } - - const hasUrl = /url\(/i.test(declaration.value); - - if (hasUrl) { - return localizeDeclarationValues(false, declaration, context); - } -} - -src$2.exports = (options = {}) => { - if ( - options && - options.mode && - options.mode !== "global" && - options.mode !== "local" && - options.mode !== "pure" - ) { - throw new Error( - 'options.mode must be either "global", "local" or "pure" (default "local")' - ); - } - - const pureMode = options && options.mode === "pure"; - const globalMode = options && options.mode === "global"; - - return { - postcssPlugin: "postcss-modules-local-by-default", - prepare() { - const localAliasMap = new Map(); - - return { - Once(root) { - const { icssImports } = extractICSS(root, false); - - Object.keys(icssImports).forEach((key) => { - Object.keys(icssImports[key]).forEach((prop) => { - localAliasMap.set(prop, icssImports[key][prop]); - }); - }); - - root.walkAtRules((atRule) => { - if (/keyframes$/i.test(atRule.name)) { - const globalMatch = /^\s*:global\s*\((.+)\)\s*$/.exec( - atRule.params - ); - const localMatch = /^\s*:local\s*\((.+)\)\s*$/.exec( - atRule.params - ); - - let globalKeyframes = globalMode; - - if (globalMatch) { - if (pureMode) { - throw atRule.error( - "@keyframes :global(...) is not allowed in pure mode" - ); - } - atRule.params = globalMatch[1]; - globalKeyframes = true; - } else if (localMatch) { - atRule.params = localMatch[0]; - globalKeyframes = false; - } else if (!globalMode) { - if (atRule.params && !localAliasMap.has(atRule.params)) { - atRule.params = ":local(" + atRule.params + ")"; - } - } - - atRule.walkDecls((declaration) => { - localizeDeclaration(declaration, { - localAliasMap, - options: options, - global: globalKeyframes, - }); - }); - } else if (atRule.nodes) { - atRule.nodes.forEach((declaration) => { - if (declaration.type === "decl") { - localizeDeclaration(declaration, { - localAliasMap, - options: options, - global: globalMode, - }); - } - }); - } - }); - - root.walkRules((rule) => { - if ( - rule.parent && - rule.parent.type === "atrule" && - /keyframes$/i.test(rule.parent.name) - ) { - // ignore keyframe rules - return; - } - - const context = localizeNode(rule, options.mode, localAliasMap); - - context.options = options; - context.localAliasMap = localAliasMap; - - if (pureMode && context.hasPureGlobals) { - throw rule.error( - 'Selector "' + - rule.selector + - '" is not pure ' + - "(pure selectors must contain at least one local class or id)" - ); - } - - rule.selector = context.selector; - - // Less-syntax mixins parse as rules with no nodes - if (rule.nodes) { - rule.nodes.forEach((declaration) => - localizeDeclaration(declaration, context) - ); - } - }); - }, - }; - }, - }; -}; -src$2.exports.postcss = true; - -var srcExports$1 = src$2.exports; - -const selectorParser = distExports; - -const hasOwnProperty = Object.prototype.hasOwnProperty; - -function getSingleLocalNamesForComposes(root) { - return root.nodes.map((node) => { - if (node.type !== "selector" || node.nodes.length !== 1) { - throw new Error( - `composition is only allowed when selector is single :local class name not in "${root}"` - ); - } - - node = node.nodes[0]; - - if ( - node.type !== "pseudo" || - node.value !== ":local" || - node.nodes.length !== 1 - ) { - throw new Error( - 'composition is only allowed when selector is single :local class name not in "' + - root + - '", "' + - node + - '" is weird' - ); - } - - node = node.first; - - if (node.type !== "selector" || node.length !== 1) { - throw new Error( - 'composition is only allowed when selector is single :local class name not in "' + - root + - '", "' + - node + - '" is weird' - ); - } - - node = node.first; - - if (node.type !== "class") { - // 'id' is not possible, because you can't compose ids - throw new Error( - 'composition is only allowed when selector is single :local class name not in "' + - root + - '", "' + - node + - '" is weird' - ); - } - - return node.value; - }); -} - -const whitespace = "[\\x20\\t\\r\\n\\f]"; -const unescapeRegExp = new RegExp( - "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", - "ig" -); - -function unescape(str) { - return str.replace(unescapeRegExp, (_, escaped, escapedWhitespace) => { - const high = "0x" + escaped - 0x10000; - - // NaN means non-codepoint - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace - ? escaped - : high < 0 - ? // BMP codepoint - String.fromCharCode(high + 0x10000) - : // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode((high >> 10) | 0xd800, (high & 0x3ff) | 0xdc00); - }); -} - -const plugin = (options = {}) => { - const generateScopedName = - (options && options.generateScopedName) || plugin.generateScopedName; - const generateExportEntry = - (options && options.generateExportEntry) || plugin.generateExportEntry; - const exportGlobals = options && options.exportGlobals; - - return { - postcssPlugin: "postcss-modules-scope", - Once(root, { rule }) { - const exports = Object.create(null); - - function exportScopedName(name, rawName) { - const scopedName = generateScopedName( - rawName ? rawName : name, - root.source.input.from, - root.source.input.css - ); - const exportEntry = generateExportEntry( - rawName ? rawName : name, - scopedName, - root.source.input.from, - root.source.input.css - ); - const { key, value } = exportEntry; - - exports[key] = exports[key] || []; - - if (exports[key].indexOf(value) < 0) { - exports[key].push(value); - } - - return scopedName; - } - - function localizeNode(node) { - switch (node.type) { - case "selector": - node.nodes = node.map(localizeNode); - return node; - case "class": - return selectorParser.className({ - value: exportScopedName( - node.value, - node.raws && node.raws.value ? node.raws.value : null - ), - }); - case "id": { - return selectorParser.id({ - value: exportScopedName( - node.value, - node.raws && node.raws.value ? node.raws.value : null - ), - }); - } - } - - throw new Error( - `${node.type} ("${node}") is not allowed in a :local block` - ); - } - - function traverseNode(node) { - switch (node.type) { - case "pseudo": - if (node.value === ":local") { - if (node.nodes.length !== 1) { - throw new Error('Unexpected comma (",") in :local block'); - } - - const selector = localizeNode(node.first); - // move the spaces that were around the psuedo selector to the first - // non-container node - selector.first.spaces = node.spaces; - - const nextNode = node.next(); - - if ( - nextNode && - nextNode.type === "combinator" && - nextNode.value === " " && - /\\[A-F0-9]{1,6}$/.test(selector.last.value) - ) { - selector.last.spaces.after = " "; - } - - node.replaceWith(selector); - - return; - } - /* falls through */ - case "root": - case "selector": { - node.each(traverseNode); - break; - } - case "id": - case "class": - if (exportGlobals) { - exports[node.value] = [node.value]; - } - break; - } - return node; - } - - // Find any :import and remember imported names - const importedNames = {}; - - root.walkRules(/^:import\(.+\)$/, (rule) => { - rule.walkDecls((decl) => { - importedNames[decl.prop] = true; - }); - }); - - // Find any :local selectors - root.walkRules((rule) => { - let parsedSelector = selectorParser().astSync(rule); - - rule.selector = traverseNode(parsedSelector.clone()).toString(); - - rule.walkDecls(/composes|compose-with/i, (decl) => { - const localNames = getSingleLocalNamesForComposes(parsedSelector); - const classes = decl.value.split(/\s+/); - - classes.forEach((className) => { - const global = /^global\(([^)]+)\)$/.exec(className); - - if (global) { - localNames.forEach((exportedName) => { - exports[exportedName].push(global[1]); - }); - } else if (hasOwnProperty.call(importedNames, className)) { - localNames.forEach((exportedName) => { - exports[exportedName].push(className); - }); - } else if (hasOwnProperty.call(exports, className)) { - localNames.forEach((exportedName) => { - exports[className].forEach((item) => { - exports[exportedName].push(item); - }); - }); - } else { - throw decl.error( - `referenced class name "${className}" in ${decl.prop} not found` - ); - } - }); - - decl.remove(); - }); - - // Find any :local values - rule.walkDecls((decl) => { - if (!/:local\s*\((.+?)\)/.test(decl.value)) { - return; - } - - let tokens = decl.value.split(/(,|'[^']*'|"[^"]*")/); - - tokens = tokens.map((token, idx) => { - if (idx === 0 || tokens[idx - 1] === ",") { - let result = token; - - const localMatch = /:local\s*\((.+?)\)/.exec(token); - - if (localMatch) { - const input = localMatch.input; - const matchPattern = localMatch[0]; - const matchVal = localMatch[1]; - const newVal = exportScopedName(matchVal); - - result = input.replace(matchPattern, newVal); - } else { - return token; - } - - return result; - } else { - return token; - } - }); - - decl.value = tokens.join(""); - }); - }); - - // Find any :local keyframes - root.walkAtRules(/keyframes$/i, (atRule) => { - const localMatch = /^\s*:local\s*\((.+?)\)\s*$/.exec(atRule.params); - - if (!localMatch) { - return; - } - - atRule.params = exportScopedName(localMatch[1]); - }); - - // If we found any :locals, insert an :export rule - const exportedNames = Object.keys(exports); - - if (exportedNames.length > 0) { - const exportRule = rule({ selector: ":export" }); - - exportedNames.forEach((exportedName) => - exportRule.append({ - prop: exportedName, - value: exports[exportedName].join(" "), - raws: { before: "\n " }, - }) - ); - - root.append(exportRule); - } - }, - }; -}; - -plugin.postcss = true; - -plugin.generateScopedName = function (name, path) { - const sanitisedPath = path - .replace(/\.[^./\\]+$/, "") - .replace(/[\W_]+/g, "_") - .replace(/^_|_$/g, ""); - - return `_${sanitisedPath}__${name}`.trim(); -}; - -plugin.generateExportEntry = function (name, scopedName) { - return { - key: unescape(name), - value: unescape(scopedName), - }; -}; - -var src$1 = plugin; - -function hash(str) { - var hash = 5381, - i = str.length; - - while(i) { - hash = (hash * 33) ^ str.charCodeAt(--i); - } - - /* JavaScript does bitwise operations (like XOR, above) on 32-bit signed - * integers. Since we want the results to be always positive, convert the - * signed int to an unsigned by doing an unsigned bitshift. */ - return hash >>> 0; -} - -var stringHash = hash; - -var src = {exports: {}}; - -const ICSSUtils = src$4; - -const matchImports = /^(.+?|\([\s\S]+?\))\s+from\s+("[^"]*"|'[^']*'|[\w-]+)$/; -const matchValueDefinition = /(?:\s+|^)([\w-]+):?(.*?)$/; -const matchImport = /^([\w-]+)(?:\s+as\s+([\w-]+))?/; - -src.exports = (options) => { - let importIndex = 0; - const createImportedName = - (options && options.createImportedName) || - ((importName /*, path*/) => - `i__const_${importName.replace(/\W/g, "_")}_${importIndex++}`); - - return { - postcssPlugin: "postcss-modules-values", - prepare(result) { - const importAliases = []; - const definitions = {}; - - return { - Once(root, postcss) { - root.walkAtRules(/value/i, (atRule) => { - const matches = atRule.params.match(matchImports); - - if (matches) { - let [, /*match*/ aliases, path] = matches; - - // We can use constants for path names - if (definitions[path]) { - path = definitions[path]; - } - - const imports = aliases - .replace(/^\(\s*([\s\S]+)\s*\)$/, "$1") - .split(/\s*,\s*/) - .map((alias) => { - const tokens = matchImport.exec(alias); - - if (tokens) { - const [, /*match*/ theirName, myName = theirName] = tokens; - const importedName = createImportedName(myName); - definitions[myName] = importedName; - return { theirName, importedName }; - } else { - throw new Error(`@import statement "${alias}" is invalid!`); - } - }); - - importAliases.push({ path, imports }); - - atRule.remove(); - - return; - } - - if (atRule.params.indexOf("@value") !== -1) { - result.warn("Invalid value definition: " + atRule.params); - } - - let [, key, value] = `${atRule.params}${atRule.raws.between}`.match( - matchValueDefinition - ); - - const normalizedValue = value.replace(/\/\*((?!\*\/).*?)\*\//g, ""); - - if (normalizedValue.length === 0) { - result.warn("Invalid value definition: " + atRule.params); - atRule.remove(); - - return; - } - - let isOnlySpace = /^\s+$/.test(normalizedValue); - - if (!isOnlySpace) { - value = value.trim(); - } - - // Add to the definitions, knowing that values can refer to each other - definitions[key] = ICSSUtils.replaceValueSymbols( - value, - definitions - ); - - atRule.remove(); - }); - - /* If we have no definitions, don't continue */ - if (!Object.keys(definitions).length) { - return; - } - - /* Perform replacements */ - ICSSUtils.replaceSymbols(root, definitions); - - /* We want to export anything defined by now, but don't add it to the CSS yet or it well get picked up by the replacement stuff */ - const exportDeclarations = Object.keys(definitions).map((key) => - postcss.decl({ - value: definitions[key], - prop: key, - raws: { before: "\n " }, - }) - ); - - /* Add export rules if any */ - if (exportDeclarations.length > 0) { - const exportRule = postcss.rule({ - selector: ":export", - raws: { after: "\n" }, - }); - - exportRule.append(exportDeclarations); - - root.prepend(exportRule); - } - - /* Add import rules */ - importAliases.reverse().forEach(({ path, imports }) => { - const importRule = postcss.rule({ - selector: `:import(${path})`, - raws: { after: "\n" }, - }); - - imports.forEach(({ theirName, importedName }) => { - importRule.append({ - value: theirName, - prop: importedName, - raws: { before: "\n " }, - }); - }); - - root.prepend(importRule); - }); - }, - }; - }, - }; -}; - -src.exports.postcss = true; - -var srcExports = src.exports; - -Object.defineProperty(scoping, "__esModule", { - value: true -}); -scoping.behaviours = void 0; -scoping.getDefaultPlugins = getDefaultPlugins; -scoping.getDefaultScopeBehaviour = getDefaultScopeBehaviour; -scoping.getScopedNameGenerator = getScopedNameGenerator; - -var _postcssModulesExtractImports = _interopRequireDefault$1(srcExports$2); - -var _genericNames = _interopRequireDefault$1(genericNames); - -var _postcssModulesLocalByDefault = _interopRequireDefault$1(srcExports$1); - -var _postcssModulesScope = _interopRequireDefault$1(src$1); - -var _stringHash = _interopRequireDefault$1(stringHash); - -var _postcssModulesValues = _interopRequireDefault$1(srcExports); - -function _interopRequireDefault$1(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -const behaviours = { - LOCAL: "local", - GLOBAL: "global" -}; -scoping.behaviours = behaviours; - -function getDefaultPlugins({ - behaviour, - generateScopedName, - exportGlobals -}) { - const scope = (0, _postcssModulesScope.default)({ - generateScopedName, - exportGlobals - }); - const plugins = { - [behaviours.LOCAL]: [_postcssModulesValues.default, (0, _postcssModulesLocalByDefault.default)({ - mode: "local" - }), _postcssModulesExtractImports.default, scope], - [behaviours.GLOBAL]: [_postcssModulesValues.default, (0, _postcssModulesLocalByDefault.default)({ - mode: "global" - }), _postcssModulesExtractImports.default, scope] - }; - return plugins[behaviour]; -} - -function isValidBehaviour(behaviour) { - return Object.keys(behaviours).map(key => behaviours[key]).indexOf(behaviour) > -1; -} - -function getDefaultScopeBehaviour(scopeBehaviour) { - return scopeBehaviour && isValidBehaviour(scopeBehaviour) ? scopeBehaviour : behaviours.LOCAL; -} - -function generateScopedNameDefault(name, filename, css) { - const i = css.indexOf(`.${name}`); - const lineNumber = css.substr(0, i).split(/[\r\n]/).length; - const hash = (0, _stringHash.default)(css).toString(36).substr(0, 5); - return `_${name}_${hash}_${lineNumber}`; -} - -function getScopedNameGenerator(generateScopedName, hashPrefix) { - const scopedNameGenerator = generateScopedName || generateScopedNameDefault; - - if (typeof scopedNameGenerator === "function") { - return scopedNameGenerator; - } - - return (0, _genericNames.default)(scopedNameGenerator, { - context: process.cwd(), - hashPrefix: hashPrefix - }); -} - -Object.defineProperty(pluginFactory, "__esModule", { - value: true -}); -pluginFactory.makePlugin = makePlugin; - -var _postcss = _interopRequireDefault(postcss$1); - -var _unquote = _interopRequireDefault(unquote$1); - -var _Parser = _interopRequireDefault(Parser$1); - -var _saveJSON = _interopRequireDefault(saveJSON$1); - -var _localsConvention = localsConvention; - -var _FileSystemLoader = _interopRequireDefault(FileSystemLoader$1); - -var _scoping = scoping; - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -const PLUGIN_NAME = "postcss-modules"; - -function isGlobalModule(globalModules, inputFile) { - return globalModules.some(regex => inputFile.match(regex)); -} - -function getDefaultPluginsList(opts, inputFile) { - const globalModulesList = opts.globalModulePaths || null; - const exportGlobals = opts.exportGlobals || false; - const defaultBehaviour = (0, _scoping.getDefaultScopeBehaviour)(opts.scopeBehaviour); - const generateScopedName = (0, _scoping.getScopedNameGenerator)(opts.generateScopedName, opts.hashPrefix); - - if (globalModulesList && isGlobalModule(globalModulesList, inputFile)) { - return (0, _scoping.getDefaultPlugins)({ - behaviour: _scoping.behaviours.GLOBAL, - generateScopedName, - exportGlobals - }); - } - - return (0, _scoping.getDefaultPlugins)({ - behaviour: defaultBehaviour, - generateScopedName, - exportGlobals - }); -} - -function getLoader(opts, plugins) { - const root = typeof opts.root === "undefined" ? "/" : opts.root; - return typeof opts.Loader === "function" ? new opts.Loader(root, plugins, opts.resolve) : new _FileSystemLoader.default(root, plugins, opts.resolve); -} - -function isOurPlugin(plugin) { - return plugin.postcssPlugin === PLUGIN_NAME; -} - -function makePlugin(opts) { - return { - postcssPlugin: PLUGIN_NAME, - - async OnceExit(css, { - result - }) { - const getJSON = opts.getJSON || _saveJSON.default; - const inputFile = css.source.input.file; - const pluginList = getDefaultPluginsList(opts, inputFile); - const resultPluginIndex = result.processor.plugins.findIndex(plugin => isOurPlugin(plugin)); - - if (resultPluginIndex === -1) { - throw new Error("Plugin missing from options."); - } - - const earlierPlugins = result.processor.plugins.slice(0, resultPluginIndex); - const loaderPlugins = [...earlierPlugins, ...pluginList]; - const loader = getLoader(opts, loaderPlugins); - - const fetcher = async (file, relativeTo, depTrace) => { - const unquoteFile = (0, _unquote.default)(file); - return loader.fetch.call(loader, unquoteFile, relativeTo, depTrace); - }; - - const parser = new _Parser.default(fetcher); - await (0, _postcss.default)([...pluginList, parser.plugin()]).process(css, { - from: inputFile - }); - const out = loader.finalSource; - if (out) css.prepend(out); - - if (opts.localsConvention) { - const reducer = (0, _localsConvention.makeLocalsConventionReducer)(opts.localsConvention, inputFile); - parser.exportTokens = Object.entries(parser.exportTokens).reduce(reducer, {}); - } - - result.messages.push({ - type: "export", - plugin: "postcss-modules", - exportTokens: parser.exportTokens - }); // getJSON may return a promise - - return getJSON(css.source.input.file, parser.exportTokens, result.opts.to); - } - - }; -} - -var _fs = require$$0__default__default; - -var _fs2 = fs; - -var _pluginFactory = pluginFactory; - -(0, _fs2.setFileSystem)({ - readFile: _fs.readFile, - writeFile: _fs.writeFile -}); - -build.exports = (opts = {}) => (0, _pluginFactory.makePlugin)(opts); - -var postcss = build.exports.postcss = true; - -var buildExports = build.exports; -var index = /*@__PURE__*/getDefaultExportFromCjs(buildExports); - -var index$1 = /*#__PURE__*/_mergeNamespaces({ - __proto__: null, - default: index, - postcss: postcss -}, [buildExports]); - -export { index$1 as i }; diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-f0e43e7d.css b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-f0e43e7d.css deleted file mode 100644 index fb320f5e9afc1570c36e34f44865052ff83acf86..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-f0e43e7d.css +++ /dev/null @@ -1 +0,0 @@ -.base-image.svelte-m3v3vb.svelte-m3v3vb{display:block;width:100%;height:auto}.container.svelte-m3v3vb.svelte-m3v3vb{display:flex;position:relative;flex-direction:column;justify-content:center;align-items:center;width:var(--size-full);height:var(--size-full)}.image-container.svelte-m3v3vb.svelte-m3v3vb{position:relative;top:0;left:0;flex-grow:1;width:100%;overflow:hidden}.fit-height.svelte-m3v3vb.svelte-m3v3vb{position:absolute;top:0;left:0;width:100%;height:100%;object-fit:contain}.mask.svelte-m3v3vb.svelte-m3v3vb{opacity:.85;transition:all .2s ease-in-out}.image-container.svelte-m3v3vb:hover .mask.svelte-m3v3vb{opacity:.3}.mask.active.svelte-m3v3vb.svelte-m3v3vb{opacity:1}.mask.inactive.svelte-m3v3vb.svelte-m3v3vb{opacity:0}.legend.svelte-m3v3vb.svelte-m3v3vb{display:flex;flex-direction:row;flex-wrap:wrap;align-content:center;justify-content:center;align-items:center;gap:var(--spacing-sm);padding:var(--spacing-sm)}.legend-item.svelte-m3v3vb.svelte-m3v3vb{display:flex;flex-direction:row;align-items:center;cursor:pointer;border-radius:var(--radius-sm);padding:var(--spacing-sm)} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-f0e43e7d.css b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-f0e43e7d.css deleted file mode 100644 index fb320f5e9afc1570c36e34f44865052ff83acf86..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-f0e43e7d.css +++ /dev/null @@ -1 +0,0 @@ -.base-image.svelte-m3v3vb.svelte-m3v3vb{display:block;width:100%;height:auto}.container.svelte-m3v3vb.svelte-m3v3vb{display:flex;position:relative;flex-direction:column;justify-content:center;align-items:center;width:var(--size-full);height:var(--size-full)}.image-container.svelte-m3v3vb.svelte-m3v3vb{position:relative;top:0;left:0;flex-grow:1;width:100%;overflow:hidden}.fit-height.svelte-m3v3vb.svelte-m3v3vb{position:absolute;top:0;left:0;width:100%;height:100%;object-fit:contain}.mask.svelte-m3v3vb.svelte-m3v3vb{opacity:.85;transition:all .2s ease-in-out}.image-container.svelte-m3v3vb:hover .mask.svelte-m3v3vb{opacity:.3}.mask.active.svelte-m3v3vb.svelte-m3v3vb{opacity:1}.mask.inactive.svelte-m3v3vb.svelte-m3v3vb{opacity:0}.legend.svelte-m3v3vb.svelte-m3v3vb{display:flex;flex-direction:row;flex-wrap:wrap;align-content:center;justify-content:center;align-items:center;gap:var(--spacing-sm);padding:var(--spacing-sm)}.legend-item.svelte-m3v3vb.svelte-m3v3vb{display:flex;flex-direction:row;align-items:center;cursor:pointer;border-radius:var(--radius-sm);padding:var(--spacing-sm)} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_agg_filter.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_agg_filter.py deleted file mode 100644 index dc8cff6858ae52a85028b4f8b3266b4d1880f687..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_agg_filter.py +++ /dev/null @@ -1,33 +0,0 @@ -import numpy as np - -import matplotlib.pyplot as plt -from matplotlib.testing.decorators import image_comparison - - -@image_comparison(baseline_images=['agg_filter_alpha'], - extensions=['png', 'pdf']) -def test_agg_filter_alpha(): - # Remove this line when this test image is regenerated. - plt.rcParams['pcolormesh.snap'] = False - - ax = plt.axes() - x, y = np.mgrid[0:7, 0:8] - data = x**2 - y**2 - mesh = ax.pcolormesh(data, cmap='Reds', zorder=5) - - def manual_alpha(im, dpi): - im[:, :, 3] *= 0.6 - print('CALLED') - return im, 0, 0 - - # Note: Doing alpha like this is not the same as setting alpha on - # the mesh itself. Currently meshes are drawn as independent patches, - # and we see fine borders around the blocks of color. See the SO - # question for an example: https://stackoverflow.com/q/20678817/ - mesh.set_agg_filter(manual_alpha) - - # Currently we must enable rasterization for this to have an effect in - # the PDF backend. - mesh.set_rasterized(True) - - ax.plot([0, 4, 7], [1, 3, 8]) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py deleted file mode 100644 index ae17452b6c583abf84402a0dc34ee79299e5359c..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py +++ /dev/null @@ -1,336 +0,0 @@ -""" -An experimental support for curvilinear grid. -""" - -import functools -from itertools import chain - -import numpy as np - -import matplotlib as mpl -from matplotlib.path import Path -from matplotlib.transforms import Affine2D, IdentityTransform -from .axislines import ( - _FixedAxisArtistHelperBase, _FloatingAxisArtistHelperBase, GridHelperBase) -from .axis_artist import AxisArtist -from .grid_finder import GridFinder - - -def _value_and_jacobian(func, xs, ys, xlims, ylims): - """ - Compute *func* and its derivatives along x and y at positions *xs*, *ys*, - while ensuring that finite difference calculations don't try to evaluate - values outside of *xlims*, *ylims*. - """ - eps = np.finfo(float).eps ** (1/2) # see e.g. scipy.optimize.approx_fprime - val = func(xs, ys) - # Take the finite difference step in the direction where the bound is the - # furthest; the step size is min of epsilon and distance to that bound. - xlo, xhi = sorted(xlims) - dxlo = xs - xlo - dxhi = xhi - xs - xeps = (np.take([-1, 1], dxhi >= dxlo) - * np.minimum(eps, np.maximum(dxlo, dxhi))) - val_dx = func(xs + xeps, ys) - ylo, yhi = sorted(ylims) - dylo = ys - ylo - dyhi = yhi - ys - yeps = (np.take([-1, 1], dyhi >= dylo) - * np.minimum(eps, np.maximum(dylo, dyhi))) - val_dy = func(xs, ys + yeps) - return (val, (val_dx - val) / xeps, (val_dy - val) / yeps) - - -class FixedAxisArtistHelper(_FixedAxisArtistHelperBase): - """ - Helper class for a fixed axis. - """ - - def __init__(self, grid_helper, side, nth_coord_ticks=None): - """ - nth_coord = along which coordinate value varies. - nth_coord = 0 -> x axis, nth_coord = 1 -> y axis - """ - - super().__init__(loc=side) - - self.grid_helper = grid_helper - if nth_coord_ticks is None: - nth_coord_ticks = self.nth_coord - self.nth_coord_ticks = nth_coord_ticks - - self.side = side - - def update_lim(self, axes): - self.grid_helper.update_lim(axes) - - def get_tick_transform(self, axes): - return axes.transData - - def get_tick_iterators(self, axes): - """tick_loc, tick_angle, tick_label""" - v1, v2 = axes.get_ylim() if self.nth_coord == 0 else axes.get_xlim() - if v1 > v2: # Inverted limits. - side = {"left": "right", "right": "left", - "top": "bottom", "bottom": "top"}[self.side] - else: - side = self.side - g = self.grid_helper - ti1 = g.get_tick_iterator(self.nth_coord_ticks, side) - ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, side, minor=True) - return chain(ti1, ti2), iter([]) - - -class FloatingAxisArtistHelper(_FloatingAxisArtistHelperBase): - - def __init__(self, grid_helper, nth_coord, value, axis_direction=None): - """ - nth_coord = along which coordinate value varies. - nth_coord = 0 -> x axis, nth_coord = 1 -> y axis - """ - super().__init__(nth_coord, value) - self.value = value - self.grid_helper = grid_helper - self._extremes = -np.inf, np.inf - self._line_num_points = 100 # number of points to create a line - - def set_extremes(self, e1, e2): - if e1 is None: - e1 = -np.inf - if e2 is None: - e2 = np.inf - self._extremes = e1, e2 - - def update_lim(self, axes): - self.grid_helper.update_lim(axes) - - x1, x2 = axes.get_xlim() - y1, y2 = axes.get_ylim() - grid_finder = self.grid_helper.grid_finder - extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy, - x1, y1, x2, y2) - - lon_min, lon_max, lat_min, lat_max = extremes - e_min, e_max = self._extremes # ranges of other coordinates - if self.nth_coord == 0: - lat_min = max(e_min, lat_min) - lat_max = min(e_max, lat_max) - elif self.nth_coord == 1: - lon_min = max(e_min, lon_min) - lon_max = min(e_max, lon_max) - - lon_levs, lon_n, lon_factor = \ - grid_finder.grid_locator1(lon_min, lon_max) - lat_levs, lat_n, lat_factor = \ - grid_finder.grid_locator2(lat_min, lat_max) - - if self.nth_coord == 0: - xx0 = np.full(self._line_num_points, self.value) - yy0 = np.linspace(lat_min, lat_max, self._line_num_points) - xx, yy = grid_finder.transform_xy(xx0, yy0) - elif self.nth_coord == 1: - xx0 = np.linspace(lon_min, lon_max, self._line_num_points) - yy0 = np.full(self._line_num_points, self.value) - xx, yy = grid_finder.transform_xy(xx0, yy0) - - self._grid_info = { - "extremes": (lon_min, lon_max, lat_min, lat_max), - "lon_info": (lon_levs, lon_n, np.asarray(lon_factor)), - "lat_info": (lat_levs, lat_n, np.asarray(lat_factor)), - "lon_labels": grid_finder.tick_formatter1( - "bottom", lon_factor, lon_levs), - "lat_labels": grid_finder.tick_formatter2( - "bottom", lat_factor, lat_levs), - "line_xy": (xx, yy), - } - - def get_axislabel_transform(self, axes): - return Affine2D() # axes.transData - - def get_axislabel_pos_angle(self, axes): - def trf_xy(x, y): - trf = self.grid_helper.grid_finder.get_transform() + axes.transData - return trf.transform([x, y]).T - - xmin, xmax, ymin, ymax = self._grid_info["extremes"] - if self.nth_coord == 0: - xx0 = self.value - yy0 = (ymin + ymax) / 2 - elif self.nth_coord == 1: - xx0 = (xmin + xmax) / 2 - yy0 = self.value - xy1, dxy1_dx, dxy1_dy = _value_and_jacobian( - trf_xy, xx0, yy0, (xmin, xmax), (ymin, ymax)) - p = axes.transAxes.inverted().transform(xy1) - if 0 <= p[0] <= 1 and 0 <= p[1] <= 1: - d = [dxy1_dy, dxy1_dx][self.nth_coord] - return xy1, np.rad2deg(np.arctan2(*d[::-1])) - else: - return None, None - - def get_tick_transform(self, axes): - return IdentityTransform() # axes.transData - - def get_tick_iterators(self, axes): - """tick_loc, tick_angle, tick_label, (optionally) tick_label""" - - lat_levs, lat_n, lat_factor = self._grid_info["lat_info"] - yy0 = lat_levs / lat_factor - - lon_levs, lon_n, lon_factor = self._grid_info["lon_info"] - xx0 = lon_levs / lon_factor - - e0, e1 = self._extremes - - def trf_xy(x, y): - trf = self.grid_helper.grid_finder.get_transform() + axes.transData - return trf.transform(np.column_stack(np.broadcast_arrays(x, y))).T - - # find angles - if self.nth_coord == 0: - mask = (e0 <= yy0) & (yy0 <= e1) - (xx1, yy1), (dxx1, dyy1), (dxx2, dyy2) = _value_and_jacobian( - trf_xy, self.value, yy0[mask], (-np.inf, np.inf), (e0, e1)) - labels = self._grid_info["lat_labels"] - - elif self.nth_coord == 1: - mask = (e0 <= xx0) & (xx0 <= e1) - (xx1, yy1), (dxx2, dyy2), (dxx1, dyy1) = _value_and_jacobian( - trf_xy, xx0[mask], self.value, (-np.inf, np.inf), (e0, e1)) - labels = self._grid_info["lon_labels"] - - labels = [l for l, m in zip(labels, mask) if m] - - angle_normal = np.arctan2(dyy1, dxx1) - angle_tangent = np.arctan2(dyy2, dxx2) - mm = (dyy1 == 0) & (dxx1 == 0) # points with degenerate normal - angle_normal[mm] = angle_tangent[mm] + np.pi / 2 - - tick_to_axes = self.get_tick_transform(axes) - axes.transAxes - in_01 = functools.partial( - mpl.transforms._interval_contains_close, (0, 1)) - - def f1(): - for x, y, normal, tangent, lab \ - in zip(xx1, yy1, angle_normal, angle_tangent, labels): - c2 = tick_to_axes.transform((x, y)) - if in_01(c2[0]) and in_01(c2[1]): - yield [x, y], *np.rad2deg([normal, tangent]), lab - - return f1(), iter([]) - - def get_line_transform(self, axes): - return axes.transData - - def get_line(self, axes): - self.update_lim(axes) - x, y = self._grid_info["line_xy"] - return Path(np.column_stack([x, y])) - - -class GridHelperCurveLinear(GridHelperBase): - def __init__(self, aux_trans, - extreme_finder=None, - grid_locator1=None, - grid_locator2=None, - tick_formatter1=None, - tick_formatter2=None): - """ - Parameters - ---------- - aux_trans : `.Transform` or tuple[Callable, Callable] - The transform from curved coordinates to rectilinear coordinate: - either a `.Transform` instance (which provides also its inverse), - or a pair of callables ``(trans, inv_trans)`` that define the - transform and its inverse. The callables should have signature:: - - x_rect, y_rect = trans(x_curved, y_curved) - x_curved, y_curved = inv_trans(x_rect, y_rect) - - extreme_finder - - grid_locator1, grid_locator2 - Grid locators for each axis. - - tick_formatter1, tick_formatter2 - Tick formatters for each axis. - """ - super().__init__() - self._grid_info = None - self.grid_finder = GridFinder(aux_trans, - extreme_finder, - grid_locator1, - grid_locator2, - tick_formatter1, - tick_formatter2) - - def update_grid_finder(self, aux_trans=None, **kwargs): - if aux_trans is not None: - self.grid_finder.update_transform(aux_trans) - self.grid_finder.update(**kwargs) - self._old_limits = None # Force revalidation. - - def new_fixed_axis(self, loc, - nth_coord=None, - axis_direction=None, - offset=None, - axes=None): - if axes is None: - axes = self.axes - if axis_direction is None: - axis_direction = loc - helper = FixedAxisArtistHelper(self, loc, nth_coord_ticks=nth_coord) - axisline = AxisArtist(axes, helper, axis_direction=axis_direction) - # Why is clip not set on axisline, unlike in new_floating_axis or in - # the floating_axig.GridHelperCurveLinear subclass? - return axisline - - def new_floating_axis(self, nth_coord, - value, - axes=None, - axis_direction="bottom" - ): - if axes is None: - axes = self.axes - helper = FloatingAxisArtistHelper( - self, nth_coord, value, axis_direction) - axisline = AxisArtist(axes, helper) - axisline.line.set_clip_on(True) - axisline.line.set_clip_box(axisline.axes.bbox) - # axisline.major_ticklabels.set_visible(True) - # axisline.minor_ticklabels.set_visible(False) - return axisline - - def _update_grid(self, x1, y1, x2, y2): - self._grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2) - - def get_gridlines(self, which="major", axis="both"): - grid_lines = [] - if axis in ["both", "x"]: - for gl in self._grid_info["lon"]["lines"]: - grid_lines.extend(gl) - if axis in ["both", "y"]: - for gl in self._grid_info["lat"]["lines"]: - grid_lines.extend(gl) - return grid_lines - - def get_tick_iterator(self, nth_coord, axis_side, minor=False): - - # axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side] - angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side] - # angle = [0, 90, 180, 270][axisnr] - lon_or_lat = ["lon", "lat"][nth_coord] - if not minor: # major ticks - for (xy, a), l in zip( - self._grid_info[lon_or_lat]["tick_locs"][axis_side], - self._grid_info[lon_or_lat]["tick_labels"][axis_side]): - angle_normal = a - yield xy, angle_normal, angle_tangent, l - else: - for (xy, a), l in zip( - self._grid_info[lon_or_lat]["tick_locs"][axis_side], - self._grid_info[lon_or_lat]["tick_labels"][axis_side]): - angle_normal = a - yield xy, angle_normal, angle_tangent, "" - # for xy, a, l in self._grid_info[lon_or_lat]["ticks"][axis_side]: - # yield xy, a, "" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/__init__.py deleted file mode 100644 index 49da6af024a31726743815ba1e36d66c03daafe5..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from pandas.tests.extension.array_with_attr.array import ( - FloatAttrArray, - FloatAttrDtype, -) - -__all__ = ["FloatAttrArray", "FloatAttrDtype"] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/class_validators.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/class_validators.py deleted file mode 100644 index 2ff72ae53b6485ca5def8d0893debfd985f6f92e..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/class_validators.py +++ /dev/null @@ -1,4 +0,0 @@ -"""`class_validators` module is a backport module from V1.""" -from ._migration import getattr_migration - -__getattr__ = getattr_migration(__name__) diff --git a/spaces/projecte-aina/transcripcio-fonetica-catala/Dockerfile b/spaces/projecte-aina/transcripcio-fonetica-catala/Dockerfile deleted file mode 100644 index e88a9b276514be897af3a681dcb59020babd0e87..0000000000000000000000000000000000000000 --- a/spaces/projecte-aina/transcripcio-fonetica-catala/Dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -FROM python:3.10.12-slim - -RUN apt-get update && apt-get install -y gnupg && \ - apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 A3A48C4A && \ - echo "deb http://ppa.launchpad.net/zeehio/festcat/ubuntu bionic main" >> /etc/apt/sources.list && \ - echo "deb-src http://ppa.launchpad.net/zeehio/festcat/ubuntu bionic main" >> /etc/apt/sources.list && \ - apt-get update && \ - apt-get -y install lame git g++ make autoconf automake libtool pkg-config gcc libsonic-dev ronn kramdown libpcaudio-dev libatlas-base-dev gfortran - -RUN git clone -b dev-ca https://github.com/projecte-aina/espeak-ng - -RUN cd espeak-ng && \ - ./autogen.sh && \ - ./configure --prefix=/usr && \ - make && \ - make install - -RUN useradd -m -u 1000 user - -USER user - -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -COPY --chown=user requirements.txt . - -RUN pip install -r requirements.txt - -COPY --chown=user app.py . - -RUN mkdir -p cache && chmod 777 cache - -ENV NUMBA_CACHE_DIR=/home/user/cache -ENV MPLCONFIGDIR=/home/user/cache - -EXPOSE 7860 - -CMD ["python3", "-u", "app.py"] diff --git a/spaces/pyInter/Liyuu_sovits4/inference/infer_tool.py b/spaces/pyInter/Liyuu_sovits4/inference/infer_tool.py deleted file mode 100644 index dbaff46f4f6eb792808e0a0cbb37fb86cb8372e2..0000000000000000000000000000000000000000 --- a/spaces/pyInter/Liyuu_sovits4/inference/infer_tool.py +++ /dev/null @@ -1,233 +0,0 @@ -import hashlib -import io -import json -import logging -import os -import time -from pathlib import Path -from inference import slicer - -import librosa -import numpy as np -# import onnxruntime -import parselmouth -import soundfile -import torch -import torchaudio - -import cluster -from hubert import hubert_model -import utils -from models import SynthesizerTrn - -logging.getLogger('matplotlib').setLevel(logging.WARNING) - - -def read_temp(file_name): - if not os.path.exists(file_name): - with open(file_name, "w") as f: - f.write(json.dumps({"info": "temp_dict"})) - return {} - else: - try: - with open(file_name, "r") as f: - data = f.read() - data_dict = json.loads(data) - if os.path.getsize(file_name) > 50 * 1024 * 1024: - f_name = file_name.replace("\\", "/").split("/")[-1] - print(f"clean {f_name}") - for wav_hash in list(data_dict.keys()): - if int(time.time()) - int(data_dict[wav_hash]["time"]) > 14 * 24 * 3600: - del data_dict[wav_hash] - except Exception as e: - print(e) - print(f"{file_name} error,auto rebuild file") - data_dict = {"info": "temp_dict"} - return data_dict - - -def write_temp(file_name, data): - with open(file_name, "w") as f: - f.write(json.dumps(data)) - - -def timeit(func): - def run(*args, **kwargs): - t = time.time() - res = func(*args, **kwargs) - print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t)) - return res - - return run - - -def format_wav(audio_path): - if Path(audio_path).suffix == '.wav': - return - raw_audio, raw_sample_rate = librosa.load(audio_path, mono=True, sr=None) - soundfile.write(Path(audio_path).with_suffix(".wav"), raw_audio, raw_sample_rate) - - -def get_end_file(dir_path, end): - file_lists = [] - for root, dirs, files in os.walk(dir_path): - files = [f for f in files if f[0] != '.'] - dirs[:] = [d for d in dirs if d[0] != '.'] - for f_file in files: - if f_file.endswith(end): - file_lists.append(os.path.join(root, f_file).replace("\\", "/")) - return file_lists - - -def get_md5(content): - return hashlib.new("md5", content).hexdigest() - -def fill_a_to_b(a, b): - if len(a) < len(b): - for _ in range(0, len(b) - len(a)): - a.append(a[0]) - -def mkdir(paths: list): - for path in paths: - if not os.path.exists(path): - os.mkdir(path) - - -class Svc(object): - def __init__(self, net_g_path, config_path, - device=None, - cluster_model_path="logs/44k/kmeans_10000.pt"): - self.net_g_path = net_g_path - if device is None: - self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - else: - self.dev = torch.device(device) - self.net_g_ms = None - self.hps_ms = utils.get_hparams_from_file(config_path) - self.target_sample = self.hps_ms.data.sampling_rate - self.hop_size = self.hps_ms.data.hop_length - self.spk2id = self.hps_ms.spk - # 加载hubert - self.hubert_model = utils.get_hubert_model().to(self.dev) - self.load_model() - if os.path.exists(cluster_model_path): - self.cluster_model = cluster.get_cluster_model(cluster_model_path) - - def load_model(self): - # 获取模型配置 - self.net_g_ms = SynthesizerTrn( - self.hps_ms.data.filter_length // 2 + 1, - self.hps_ms.train.segment_size // self.hps_ms.data.hop_length, - **self.hps_ms.model) - _ = utils.load_checkpoint(self.net_g_path, self.net_g_ms, None) - if "half" in self.net_g_path and torch.cuda.is_available(): - _ = self.net_g_ms.half().eval().to(self.dev) - else: - _ = self.net_g_ms.eval().to(self.dev) - - - - def get_unit_f0(self, in_path, tran, cluster_infer_ratio, speaker): - - wav, sr = librosa.load(in_path, sr=self.target_sample) - - f0 = utils.compute_f0_parselmouth(wav, sampling_rate=self.target_sample, hop_length=self.hop_size) - f0, uv = utils.interpolate_f0(f0) - f0 = torch.FloatTensor(f0) - uv = torch.FloatTensor(uv) - f0 = f0 * 2 ** (tran / 12) - f0 = f0.unsqueeze(0).to(self.dev) - uv = uv.unsqueeze(0).to(self.dev) - - wav16k = librosa.resample(wav, orig_sr=self.target_sample, target_sr=16000) - wav16k = torch.from_numpy(wav16k).to(self.dev) - c = utils.get_hubert_content(self.hubert_model, wav_16k_tensor=wav16k) - c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1]) - - if cluster_infer_ratio !=0: - cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.numpy().T, speaker).T - cluster_c = torch.FloatTensor(cluster_c) - c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c - - c = c.unsqueeze(0) - return c, f0, uv - - def infer(self, speaker, tran, raw_path, - cluster_infer_ratio=0, - auto_predict_f0=False, - noice_scale=0.4): - speaker_id = self.spk2id[speaker] - sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0) - c, f0, uv = self.get_unit_f0(raw_path, tran, cluster_infer_ratio, speaker) - if "half" in self.net_g_path and torch.cuda.is_available(): - c = c.half() - with torch.no_grad(): - start = time.time() - audio = self.net_g_ms.infer(c, f0=f0, g=sid, uv=uv, predict_f0=auto_predict_f0, noice_scale=noice_scale)[0,0].data.float() - use_time = time.time() - start - print("vits use time:{}".format(use_time)) - return audio, audio.shape[-1] - - def slice_inference(self,raw_audio_path, spk, tran, slice_db,cluster_infer_ratio, auto_predict_f0,noice_scale, pad_seconds=0.5): - wav_path = raw_audio_path - chunks = slicer.cut(wav_path, db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) - - audio = [] - for (slice_tag, data) in audio_data: - print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======') - # padd - pad_len = int(audio_sr * pad_seconds) - data = np.concatenate([np.zeros([pad_len]), data, np.zeros([pad_len])]) - length = int(np.ceil(len(data) / audio_sr * self.target_sample)) - raw_path = io.BytesIO() - soundfile.write(raw_path, data, audio_sr, format="wav") - raw_path.seek(0) - if slice_tag: - print('jump empty segment') - _audio = np.zeros(length) - else: - out_audio, out_sr = self.infer(spk, tran, raw_path, - cluster_infer_ratio=cluster_infer_ratio, - auto_predict_f0=auto_predict_f0, - noice_scale=noice_scale - ) - _audio = out_audio.cpu().numpy() - - pad_len = int(self.target_sample * pad_seconds) - _audio = _audio[pad_len:-pad_len] - audio.extend(list(_audio)) - return np.array(audio) - - -class RealTimeVC: - def __init__(self): - self.last_chunk = None - self.last_o = None - self.chunk_len = 16000 # 区块长度 - self.pre_len = 3840 # 交叉淡化长度,640的倍数 - - """输入输出都是1维numpy 音频波形数组""" - - def process(self, svc_model, speaker_id, f_pitch_change, input_wav_path): - import maad - audio, sr = torchaudio.load(input_wav_path) - audio = audio.cpu().numpy()[0] - temp_wav = io.BytesIO() - if self.last_chunk is None: - input_wav_path.seek(0) - audio, sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path) - audio = audio.cpu().numpy() - self.last_chunk = audio[-self.pre_len:] - self.last_o = audio - return audio[-self.chunk_len:] - else: - audio = np.concatenate([self.last_chunk, audio]) - soundfile.write(temp_wav, audio, sr, format="wav") - temp_wav.seek(0) - audio, sr = svc_model.infer(speaker_id, f_pitch_change, temp_wav) - audio = audio.cpu().numpy() - ret = maad.util.crossfade(self.last_o, audio, self.pre_len) - self.last_chunk = audio[-self.pre_len:] - self.last_o = audio - return ret[self.chunk_len:2 * self.chunk_len] diff --git a/spaces/pycui/RealChar/client/cli.py b/spaces/pycui/RealChar/client/cli.py deleted file mode 100644 index 317c6171d002005ec363baafe0dc7dcb4befa335..0000000000000000000000000000000000000000 --- a/spaces/pycui/RealChar/client/cli.py +++ /dev/null @@ -1,278 +0,0 @@ -import os -import queue -import asyncio -import concurrent.futures -import functools -import io -import sys -import random -from threading import Thread -import time - -from dotenv import load_dotenv - -import pyaudio -import speech_recognition as sr -import websockets -from aioconsole import ainput # for async input -from pydub import AudioSegment -from simpleaudio import WaveObject - -load_dotenv() - -executor = concurrent.futures.ThreadPoolExecutor(max_workers=3) -web2_initial_message = True - -CHUNK = 1024 -FORMAT = pyaudio.paInt16 -CHANNELS = 1 -RATE = 44100 - - -class AudioPlayer: - def __init__(self): - self.play_thread = None - self.stop_flag = False - self.queue = queue.Queue() - - def play_audio(self): - while not self.stop_flag or not self.queue.empty(): - try: - wav_data = self.queue.get_nowait() - except queue.Empty: - continue - - wave_obj = WaveObject.from_wave_file(wav_data) - play_obj = wave_obj.play() - - while play_obj.is_playing() and not self.stop_flag: - time.sleep(0.1) - - if self.stop_flag: - play_obj.stop() - - def start_playing(self, wav_data): - self.stop_flag = False - self.queue.put(wav_data) - - if self.play_thread is None or not self.play_thread.is_alive(): - self.play_thread = Thread(target=self.play_audio) - self.play_thread.start() - - def stop_playing(self): - if self.play_thread and self.play_thread.is_alive(): - self.stop_flag = True - self.play_thread.join() - self.play_thread = None - - def add_to_queue(self, wav_data): - self.queue.put(wav_data) - - -audio_player = AudioPlayer() - - -def get_input_device_id(): - p = pyaudio.PyAudio() - devices = [(i, p.get_device_info_by_index(i)['name']) - for i in range(p.get_device_count()) - if p.get_device_info_by_index(i).get('maxInputChannels')] - - print('Available devices:') - for id, name in devices: - print(f"Device id {id} - {name}") - - return int(input('Please select device id: ')) - - -async def handle_audio(websocket, device_id): - with sr.Microphone(device_index=device_id, sample_rate=RATE) as source: - recognizer = sr.Recognizer() - print('Source sample rate: ', source.SAMPLE_RATE) - print('Source width: ', source.SAMPLE_WIDTH) - print('Adjusting for ambient noise...Wait for 2 seconds') - recognizer.energy_threshold = 5000 - recognizer.dynamic_energy_ratio = 6 - recognizer.dynamic_energy_adjustment_damping = 0.85 - recognizer.non_speaking_duration = 0.5 - recognizer.pause_threshold = 0.8 - recognizer.phrase_threshold = 0.5 - recognizer.adjust_for_ambient_noise(source, duration=2) - listen_func = functools.partial( - recognizer.listen, source, phrase_time_limit=30) - - print('Okay, start talking!') - while True: - print('[*]', end="") # indicate that we are listening - audio = await asyncio.get_event_loop().run_in_executor(executor, listen_func) - await websocket.send(audio.frame_data) - print('[-]', end="") # indicate that we are done listening - await asyncio.sleep(2) - - -async def handle_text(websocket): - print('You: ', end="", flush=False) - while True: - message = await ainput() - await websocket.send(message) - -initial_message = True -async def receive_message(websocket, websocket2): - web1_init_message = await websocket.recv() - print('web1_init_message: ', web1_init_message) - - web2_init_message = await websocket2.recv() - print('web1_init_message: ', web2_init_message) - message_to_websocket1 = "Suppose I'm Steve Jobs now. What question do you have for me?" - await websocket.send(message_to_websocket1) - - web1_message = '' - while True: - try: - message = await websocket.recv() - print('here') - except websockets.exceptions.ConnectionClosedError as e: - print("Connection closed unexpectedly: ", e) - break - except Exception as e: - print("An error occurred: ", e) - break - - if isinstance(message, str): - if message == '[end]\n': - if not web1_message: - continue - # remove everything before '> ' in the message - message_to_websocket2 = web1_message[web1_message.find('> ') + 2:] - # print('message_to_websocket2: ', message_to_websocket2) - await websocket2.send(message_to_websocket2) - web2_message = '' - j = 0 - while True: - j += 1 - try: - message = await websocket2.recv() - except websockets.exceptions.ConnectionClosedError as e: - print("Connection closed unexpectedly: ", e) - break - except Exception as e: - print("An error occurred: ", e) - break - - if isinstance(message, str): - if message == '[end]\n': - # print('\nWebsocket2: ', end="", flush=False) - if not web2_message: - # print('skip') - continue - # remove everything before '> ' in the message - print(web2_message) - message_from_websocket2 = web2_message[web2_message.find('> ') + 2:] - await websocket.send(message_from_websocket2) - break - elif message.startswith('[+]'): - # stop playing audio - audio_player.stop_playing() - # indicate the transcription is done - # print(f"\nnWebsocket2: {message}", end="\n", flush=False) - elif message.startswith('[=]'): - # indicate the response is done - # print(f"nWebsocket2: {web2_message}", end="\n", flush=False) - pass - else: - # print('\nmessage++\n') - web2_message += message - elif isinstance(message, bytes): - global web2_initial_message - if web2_initial_message: - web2_initial_message = False - continue - audio_data = io.BytesIO(message) - audio = AudioSegment.from_mp3(audio_data) - wav_data = io.BytesIO() - audio.export(wav_data, format="wav") - # Start playing audio - audio_player.start_playing(wav_data) - - elif message.startswith('[+]'): - # stop playing audio - audio_player.stop_playing() - # indicate the transcription is done - print(f"\n{message}", end="\n", flush=False) - elif message.startswith('[=]'): - # indicate the response is done - print(f"{message}", end="\n", flush=False) - else: - web1_message += message - print(f"{message}", end="", flush=False) - elif isinstance(message, bytes): - audio_data = io.BytesIO(message) - audio = AudioSegment.from_mp3(audio_data) - wav_data = io.BytesIO() - audio.export(wav_data, format="wav") - # Start playing audio - audio_player.start_playing(wav_data) - else: - print("Unexpected message") - break - - -def select_model(): - llm_model_selection = input( - '1: gpt-3.5-turbo-16k \n' - '2: gpt-4 \n' - '3: claude-2 \n' - 'Select llm model:') - if llm_model_selection == '1': - llm_model = 'gpt-3.5-turbo-16k' - elif llm_model_selection == '2': - llm_model = 'gpt-4' - elif llm_model_selection == '3': - llm_model = 'claude-2' - return llm_model - - -async def start_client(client_id, url): - api_key = os.getenv('AUTH_API_KEY') - llm_model = select_model() - uri = f"ws://{url}/ws/{client_id}?api_key={api_key}&llm_model={llm_model}" - async with websockets.connect(uri) as websocket: - uri2 = f"ws://{url}/ws/9999999?api_key={api_key}&llm_model={llm_model}" - # send client platform info - async with websockets.connect(uri2) as websocket2: - await websocket.send('terminal') - await websocket2.send('terminal') - print(f"Client #{client_id} connected to websocket1") - print(f"Client 9999999 connected to websocket2") - welcome_message = await websocket.recv() - welcome_message2 = await websocket2.recv() - print(f"{welcome_message}") - character = input('Select character: ') - await websocket.send(character) - await websocket2.send('6') - - mode = input('Select mode (1: audio, 2: text): ') - if mode.lower() == '1': - device_id = get_input_device_id() - send_task = asyncio.create_task(handle_audio(websocket, device_id)) - else: - send_task = asyncio.create_task(handle_text(websocket)) - - receive_task = asyncio.create_task(receive_message(websocket, websocket2)) - await asyncio.gather(receive_task, send_task) - - -async def main(url): - client_id = random.randint(0, 1000000) - task = asyncio.create_task(start_client(client_id, url)) - try: - await task - except KeyboardInterrupt: - task.cancel() - await asyncio.wait_for(task, timeout=None) - print("Client stopped by user") - - -if __name__ == "__main__": - url = sys.argv[1] if len(sys.argv) > 1 else 'localhost:8000' - asyncio.run(main(url)) diff --git a/spaces/qingxu98/gpt-academic/crazy_functions/test_project/cpp/cppipc/waiter.h b/spaces/qingxu98/gpt-academic/crazy_functions/test_project/cpp/cppipc/waiter.h deleted file mode 100644 index ee45fe3517be95ac1688a3e3540189edeb0d860c..0000000000000000000000000000000000000000 --- a/spaces/qingxu98/gpt-academic/crazy_functions/test_project/cpp/cppipc/waiter.h +++ /dev/null @@ -1,83 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#include "libipc/def.h" -#include "libipc/mutex.h" -#include "libipc/condition.h" -#include "libipc/platform/detail.h" - -namespace ipc { -namespace detail { - -class waiter { - ipc::sync::condition cond_; - ipc::sync::mutex lock_; - std::atomic quit_ {false}; - -public: - static void init(); - - waiter() = default; - waiter(char const *name) { - open(name); - } - - ~waiter() { - close(); - } - - bool valid() const noexcept { - return cond_.valid() && lock_.valid(); - } - - bool open(char const *name) noexcept { - quit_.store(false, std::memory_order_relaxed); - if (!cond_.open((std::string{"_waiter_cond_"} + name).c_str())) { - return false; - } - if (!lock_.open((std::string{"_waiter_lock_"} + name).c_str())) { - cond_.close(); - return false; - } - return valid(); - } - - void close() noexcept { - cond_.close(); - lock_.close(); - } - - template - bool wait_if(F &&pred, std::uint64_t tm = ipc::invalid_value) noexcept { - IPC_UNUSED_ std::lock_guard guard {lock_}; - while ([this, &pred] { - return !quit_.load(std::memory_order_relaxed) - && std::forward(pred)(); - }()) { - if (!cond_.wait(lock_, tm)) return false; - } - return true; - } - - bool notify() noexcept { - std::lock_guard{lock_}; // barrier - return cond_.notify(lock_); - } - - bool broadcast() noexcept { - std::lock_guard{lock_}; // barrier - return cond_.broadcast(lock_); - } - - bool quit_waiting() { - quit_.store(true, std::memory_order_release); - return broadcast(); - } -}; - -} // namespace detail -} // namespace ipc diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Download Software Xoul Net Proxy Checker NEW!.md b/spaces/quidiaMuxgu/Expedit-SAM/Download Software Xoul Net Proxy Checker NEW!.md deleted file mode 100644 index 52582f5c96bcd077b3d320a694fca9d84a64751d..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Download Software Xoul Net Proxy Checker NEW!.md +++ /dev/null @@ -1,7 +0,0 @@ - -

      13.07.2020Download xoul net proxy checkerComments:... Cara penggunaannya: Instal Proxy Finder Enterpriselalu cracklah software ini dengan file crack yang.... We offer 10% referral program to our proxy customers Check the details.... The visitor can check their IP (internet Protocol) address, and their browser... proxy servers. com,Free software downloads and software reviews - CNET Download. amtlib dll crack photoshop 2015

      -

      Download software xoul net proxy checker


      Download Filehttps://geags.com/2uCqEG



      -

      See the Android run-time's developer documentation for information on how to launch apps on Android or the information on how to install apps on Android. You can also download the raw source code for the app.

      -

      Free Download Root Checker Apps on root checker - Proxy Checker - pro... To remove the found proxy servers and their IP/ Host / Port from the results, click on... CheckIfIPIsOn.exe. Proxy Checker Portable. Download Free.... Free Proxy Checker - PortScanner Portable. This is a handy proxy checker that easily scans the LAN.... Windows 7 (32- 64-bit) or newer Free ProxyCheckerPro v.1.3.4 Portable (Windows 8.1 (32- 64-bit) or... Comodo Firewall Checker is a small little tool that will allow you to check whether your... Free proxy server list (port 80 and 443) for testing and... Free proxy server checker.. HouseNet Proxy Test tool for Windows7 32-bit/64-bit/CE/2K/XP available in 2 different versions:... Free Netgear WiFi Checker is a small tool that can check your... Free proxy server list (port 80 and 443) for testing and... Free proxy server checker.. HouseNet Proxy Test tool for Windows7 32- bit/64- bit/CE/2K/XP available in 2 different versions:... Download h2h messenger-for-windows Free proxy checker..... Free proxy server list (port 80 and 443) for testing and... Free proxy server checker.. HouseNet Proxy Test tool for Windows7 32- bit/64- bit/CE/2K/XP available in 2 different versions:... Free proxy server list (port 80 and 443) for testing and... Free proxy server checker.. HouseNet Proxy Test tool for Windows7 32- bit/64- bit/CE/2K/XP available in 2 different versions:... Free proxy server list (port 80 and 443) for testing and... Free proxy server checker.. HouseNet Proxy Test tool for Windows7 32- bit/64- bit/CE/2K/XP available in 2 different versions:... Free proxy server list (port 80 and 443) for testing and... Free proxy server checker.. HouseNet Proxy Test tool for Windows7 32- bit/64- bit/CE/2K/XP available in 2 different versions:... Free proxy server list (port 80 and 443) for testing and... Free proxy server checker.. HouseNet Proxy Test tool for Windows7 32- bit/64- bit/CE/2K/XP available in 2 different versions:... Free proxy server list (port 80 and 443) for testing and... Free proxy server checker.. HouseNet Proxy Test tool for Windows7 32- bit/64- bit/CE/2K/XP available in 2 different versions:... Free proxy server list (port 80 and 443) for testing and... Free proxy server checker.. HouseNet Proxy Test tool for Windows7 32- bit/64- bit/CE/2K/XP available in 2 different versions:... Free proxy server list (port 80 and 443) for testing and... Free proxy server checker.. HouseNet Proxy Test tool for Windows7 32- bit/64- bit/CE/2K/XP available in 2 different versions:... Free proxy server list (port 80 and 443) for testing and... Free proxy server checker.. HouseNet Proxy Test tool for Windows7 32- bit/64- bit/CE/2K/XP available in 2 different versions:... Free proxy server list (port 80 and 443) for testing and... Free proxy server checker.. HouseNet Proxy Test tool for Windows7 32- bit/64- bit/CE/2K/XP available in 2 different versions:... Free proxy server list (port 80 and 443) for testing and...

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Gupi Gayen Bagha Bayen Full Movie 720p VERIFIED.md b/spaces/quidiaMuxgu/Expedit-SAM/Gupi Gayen Bagha Bayen Full Movie 720p VERIFIED.md deleted file mode 100644 index c5f127923085419a758332178010516ba7c25248..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Gupi Gayen Bagha Bayen Full Movie 720p VERIFIED.md +++ /dev/null @@ -1,12 +0,0 @@ -

      gupi gayen bagha bayen full movie 720p


      DOWNLOAD –––––>>> https://geags.com/2uCsEo



      - -17:22. This video has been removed from the Italia Collection by its owner. Published 2 years ago. We are the first. See more. best hostel in Nepal, near Kathmandu. The correct nomenclature is "choti naach," literally "small foot," or "little foot," and the term translates to "little cutie." Choti naach, the cutie or little cutie is the term used in the Royal Family.Rationale for the use of the terms "testis-obstructive azoospermia" and "azoospermia" in everyday medical practice. - -A small amount of spermatogonia is normal in the male. If the number of spermatogonia is insufficient to produce mature spermatozoa, then the condition is termed azoospermia. It has been proposed that azoospermia should be divided into two groups: obstructive azoospermia and non-obstructive azoospermia. This proposal has been based on the observation that the number of spermatogonia in a testis that is visually grossly normal can be lower than that found in a testis that is visually grossly abnormal. This proposal has been further modified to recognise that patients with obstructive azoospermia may have a normal number of spermatogonia or that patients with non-obstructive azoospermia may have a reduced number of spermatogonia. At present, it is not possible to define the threshold for the number of spermatogonia in the testis that should be considered normal. Such a definition will only be possible when accurate and reliable techniques for quantifying spermatogonia in the testis become available. However, despite these limitations, the use of the terms "testis-obstructive azoospermia" and "azoospermia" in everyday medical practice is clinically relevant.Q: - -First route is not visible in Ionic2 application - -I am new to Ionic 2. My application starts fine and home page is loaded but when I navigate to any other route or if I press a button on that page my app doesn't render the next page. I have used ng serve for the development. I am using a relative path for my routes. When I use absolute path instead of relative I don't have the same problem. But I have to use relative path for deployment because there 4fefd39f24
      -
      -
      -

      diff --git a/spaces/r3gm/SoniTranslate_translate_audio_of_a_video_content/lib/rmvpe.py b/spaces/r3gm/SoniTranslate_translate_audio_of_a_video_content/lib/rmvpe.py deleted file mode 100644 index 8d0d57297d4301e43a4fdcda216ae39c5e3b83b4..0000000000000000000000000000000000000000 --- a/spaces/r3gm/SoniTranslate_translate_audio_of_a_video_content/lib/rmvpe.py +++ /dev/null @@ -1,432 +0,0 @@ -import torch, numpy as np -import torch.nn as nn -import torch.nn.functional as F - - - -class BiGRU(nn.Module): - def __init__(self, input_features, hidden_features, num_layers): - super(BiGRU, self).__init__() - self.gru = nn.GRU( - input_features, - hidden_features, - num_layers=num_layers, - batch_first=True, - bidirectional=True, - ) - - def forward(self, x): - return self.gru(x)[0] - - -class ConvBlockRes(nn.Module): - def __init__(self, in_channels, out_channels, momentum=0.01): - super(ConvBlockRes, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - nn.Conv2d( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - ) - if in_channels != out_channels: - self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1)) - self.is_shortcut = True - else: - self.is_shortcut = False - - def forward(self, x): - if self.is_shortcut: - return self.conv(x) + self.shortcut(x) - else: - return self.conv(x) + x - - -class Encoder(nn.Module): - def __init__( - self, - in_channels, - in_size, - n_encoders, - kernel_size, - n_blocks, - out_channels=16, - momentum=0.01, - ): - super(Encoder, self).__init__() - self.n_encoders = n_encoders - self.bn = nn.BatchNorm2d(in_channels, momentum=momentum) - self.layers = nn.ModuleList() - self.latent_channels = [] - for i in range(self.n_encoders): - self.layers.append( - ResEncoderBlock( - in_channels, out_channels, kernel_size, n_blocks, momentum=momentum - ) - ) - self.latent_channels.append([out_channels, in_size]) - in_channels = out_channels - out_channels *= 2 - in_size //= 2 - self.out_size = in_size - self.out_channel = out_channels - - def forward(self, x): - concat_tensors = [] - x = self.bn(x) - for i in range(self.n_encoders): - _, x = self.layers[i](x) - concat_tensors.append(_) - return x, concat_tensors - - -class ResEncoderBlock(nn.Module): - def __init__( - self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01 - ): - super(ResEncoderBlock, self).__init__() - self.n_blocks = n_blocks - self.conv = nn.ModuleList() - self.conv.append(ConvBlockRes(in_channels, out_channels, momentum)) - for i in range(n_blocks - 1): - self.conv.append(ConvBlockRes(out_channels, out_channels, momentum)) - self.kernel_size = kernel_size - if self.kernel_size is not None: - self.pool = nn.AvgPool2d(kernel_size=kernel_size) - - def forward(self, x): - for i in range(self.n_blocks): - x = self.conv[i](x) - if self.kernel_size is not None: - return x, self.pool(x) - else: - return x - - -class Intermediate(nn.Module): # - def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01): - super(Intermediate, self).__init__() - self.n_inters = n_inters - self.layers = nn.ModuleList() - self.layers.append( - ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum) - ) - for i in range(self.n_inters - 1): - self.layers.append( - ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum) - ) - - def forward(self, x): - for i in range(self.n_inters): - x = self.layers[i](x) - return x - - -class ResDecoderBlock(nn.Module): - def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01): - super(ResDecoderBlock, self).__init__() - out_padding = (0, 1) if stride == (1, 2) else (1, 1) - self.n_blocks = n_blocks - self.conv1 = nn.Sequential( - nn.ConvTranspose2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=stride, - padding=(1, 1), - output_padding=out_padding, - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - ) - self.conv2 = nn.ModuleList() - self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum)) - for i in range(n_blocks - 1): - self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum)) - - def forward(self, x, concat_tensor): - x = self.conv1(x) - x = torch.cat((x, concat_tensor), dim=1) - for i in range(self.n_blocks): - x = self.conv2[i](x) - return x - - -class Decoder(nn.Module): - def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01): - super(Decoder, self).__init__() - self.layers = nn.ModuleList() - self.n_decoders = n_decoders - for i in range(self.n_decoders): - out_channels = in_channels // 2 - self.layers.append( - ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum) - ) - in_channels = out_channels - - def forward(self, x, concat_tensors): - for i in range(self.n_decoders): - x = self.layers[i](x, concat_tensors[-1 - i]) - return x - - -class DeepUnet(nn.Module): - def __init__( - self, - kernel_size, - n_blocks, - en_de_layers=5, - inter_layers=4, - in_channels=1, - en_out_channels=16, - ): - super(DeepUnet, self).__init__() - self.encoder = Encoder( - in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels - ) - self.intermediate = Intermediate( - self.encoder.out_channel // 2, - self.encoder.out_channel, - inter_layers, - n_blocks, - ) - self.decoder = Decoder( - self.encoder.out_channel, en_de_layers, kernel_size, n_blocks - ) - - def forward(self, x): - x, concat_tensors = self.encoder(x) - x = self.intermediate(x) - x = self.decoder(x, concat_tensors) - return x - - -class E2E(nn.Module): - def __init__( - self, - n_blocks, - n_gru, - kernel_size, - en_de_layers=5, - inter_layers=4, - in_channels=1, - en_out_channels=16, - ): - super(E2E, self).__init__() - self.unet = DeepUnet( - kernel_size, - n_blocks, - en_de_layers, - inter_layers, - in_channels, - en_out_channels, - ) - self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1)) - if n_gru: - self.fc = nn.Sequential( - BiGRU(3 * 128, 256, n_gru), - nn.Linear(512, 360), - nn.Dropout(0.25), - nn.Sigmoid(), - ) - else: - self.fc = nn.Sequential( - nn.Linear(3 * nn.N_MELS, nn.N_CLASS), nn.Dropout(0.25), nn.Sigmoid() - ) - - def forward(self, mel): - mel = mel.transpose(-1, -2).unsqueeze(1) - x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2) - x = self.fc(x) - return x - - -from librosa.filters import mel - - -class MelSpectrogram(torch.nn.Module): - def __init__( - self, - is_half, - n_mel_channels, - sampling_rate, - win_length, - hop_length, - n_fft=None, - mel_fmin=0, - mel_fmax=None, - clamp=1e-5, - ): - super().__init__() - n_fft = win_length if n_fft is None else n_fft - self.hann_window = {} - mel_basis = mel( - sr=sampling_rate, - n_fft=n_fft, - n_mels=n_mel_channels, - fmin=mel_fmin, - fmax=mel_fmax, - htk=True, - ) - mel_basis = torch.from_numpy(mel_basis).float() - self.register_buffer("mel_basis", mel_basis) - self.n_fft = win_length if n_fft is None else n_fft - self.hop_length = hop_length - self.win_length = win_length - self.sampling_rate = sampling_rate - self.n_mel_channels = n_mel_channels - self.clamp = clamp - self.is_half = is_half - - def forward(self, audio, keyshift=0, speed=1, center=True): - factor = 2 ** (keyshift / 12) - n_fft_new = int(np.round(self.n_fft * factor)) - win_length_new = int(np.round(self.win_length * factor)) - hop_length_new = int(np.round(self.hop_length * speed)) - keyshift_key = str(keyshift) + "_" + str(audio.device) - if keyshift_key not in self.hann_window: - self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to( - audio.device - ) - fft = torch.stft( - audio, - n_fft=n_fft_new, - hop_length=hop_length_new, - win_length=win_length_new, - window=self.hann_window[keyshift_key], - center=center, - return_complex=True, - ) - magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) - if keyshift != 0: - size = self.n_fft // 2 + 1 - resize = magnitude.size(1) - if resize < size: - magnitude = F.pad(magnitude, (0, 0, 0, size - resize)) - magnitude = magnitude[:, :size, :] * self.win_length / win_length_new - mel_output = torch.matmul(self.mel_basis, magnitude) - if self.is_half == True: - mel_output = mel_output.half() - log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp)) - return log_mel_spec - - -class RMVPE: - def __init__(self, model_path, is_half, device=None): - self.resample_kernel = {} - model = E2E(4, 1, (2, 2)) - ckpt = torch.load(model_path, map_location="cpu") - model.load_state_dict(ckpt) - model.eval() - if is_half == True: - model = model.half() - self.model = model - self.resample_kernel = {} - self.is_half = is_half - if device is None: - device = "cuda" if torch.cuda.is_available() else "cpu" - self.device = device - self.mel_extractor = MelSpectrogram( - is_half, 128, 16000, 1024, 160, None, 30, 8000 - ).to(device) - self.model = self.model.to(device) - cents_mapping = 20 * np.arange(360) + 1997.3794084376191 - self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368 - - def mel2hidden(self, mel): - with torch.no_grad(): - n_frames = mel.shape[-1] - mel = F.pad( - mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect" - ) - hidden = self.model(mel) - return hidden[:, :n_frames] - - def decode(self, hidden, thred=0.03): - cents_pred = self.to_local_average_cents(hidden, thred=thred) - f0 = 10 * (2 ** (cents_pred / 1200)) - f0[f0 == 10] = 0 - # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred]) - return f0 - - def infer_from_audio(self, audio, thred=0.03): - audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0) - # torch.cuda.synchronize() - # t0=ttime() - mel = self.mel_extractor(audio, center=True) - # torch.cuda.synchronize() - # t1=ttime() - hidden = self.mel2hidden(mel) - # torch.cuda.synchronize() - # t2=ttime() - hidden = hidden.squeeze(0).cpu().numpy() - if self.is_half == True: - hidden = hidden.astype("float32") - f0 = self.decode(hidden, thred=thred) - # torch.cuda.synchronize() - # t3=ttime() - # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0)) - return f0 - - def to_local_average_cents(self, salience, thred=0.05): - # t0 = ttime() - center = np.argmax(salience, axis=1) # frame length#index - salience = np.pad(salience, ((0, 0), (4, 4))) # frame length,368 - # t1 = ttime() - center += 4 - todo_salience = [] - todo_cents_mapping = [] - starts = center - 4 - ends = center + 5 - for idx in range(salience.shape[0]): - todo_salience.append(salience[:, starts[idx] : ends[idx]][idx]) - todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]]) - # t2 = ttime() - todo_salience = np.array(todo_salience) # frame length,9 - todo_cents_mapping = np.array(todo_cents_mapping) # frame length,9 - product_sum = np.sum(todo_salience * todo_cents_mapping, 1) - weight_sum = np.sum(todo_salience, 1) # frame length - devided = product_sum / weight_sum # frame length - # t3 = ttime() - maxx = np.max(salience, axis=1) # frame length - devided[maxx <= thred] = 0 - # t4 = ttime() - # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3)) - return devided - - -# if __name__ == '__main__': -# audio, sampling_rate = sf.read("Quotations~1.wav") ### edit -# if len(audio.shape) > 1: -# audio = librosa.to_mono(audio.transpose(1, 0)) -# audio_bak = audio.copy() -# if sampling_rate != 16000: -# audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) -# model_path = "/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/test-RMVPE/weights/rmvpe_llc_half.pt" -# thred = 0.03 # 0.01 -# device = 'cuda' if torch.cuda.is_available() else 'cpu' -# rmvpe = RMVPE(model_path,is_half=False, device=device) -# t0=ttime() -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# t1=ttime() -# print(f0.shape,t1-t0) diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Call of Duty Modern Warfare 3 So Survival Mp Paris.ff Experience the Thrill of Nuclear Fusion in Paris.md b/spaces/raedeXanto/academic-chatgpt-beta/Call of Duty Modern Warfare 3 So Survival Mp Paris.ff Experience the Thrill of Nuclear Fusion in Paris.md deleted file mode 100644 index 9f60ef246966795f9567cbc3542e8d02ddea5cf6..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Call of Duty Modern Warfare 3 So Survival Mp Paris.ff Experience the Thrill of Nuclear Fusion in Paris.md +++ /dev/null @@ -1,169 +0,0 @@ -
      -

      Call of Duty Modern Warfare 3 So Survival Mp Paris.ff: Everything You Need to Know

      -

      If you are a fan of Call of Duty Modern Warfare 3, you might be interested in downloading the so survival mp paris.ff file. This file is part of the Collection 1 DLC pack that adds three multiplayer/survival mode maps, including the iconic Paris map. In this article, we will tell you everything you need to know about this file, including what it is, how to play it, and how to fix any errors that might occur.

      -

      What is Call of Duty Modern Warfare 3 So Survival Mp Paris.ff?

      -

      A brief introduction to the game and the DLC pack

      -

      Call of Duty Modern Warfare 3 is a 2011 first-person shooter video game developed by Infinity Ward and published by Activision. It is the third game in the Modern Warfare series and the eighth game in the Call of Duty franchise. The game features a single-player campaign that follows the events of Modern Warfare 2, as well as a multiplayer mode that offers various modes and maps for online battles.

      -

      call of duty modern warfare 3 so survival mp paris.ff


      Download File · https://tinourl.com/2uKZzu



      -

      Collection 1 DLC pack is one of the four downloadable content packs that were released for Modern Warfare 3. It was released on March 20, 2012 for Xbox 360 and on April 19, 2012 for PlayStation 3 and PC. The pack includes four new multiplayer maps (Liberation, Piazza, Overwatch, and Black Box) and two new special ops missions (Black Ice and Negotiator). It also adds three new survival mode maps (Liberation, Piazza, and Paris), which are based on the multiplayer maps but with different layouts and enemy waves.

      -

      The features and challenges of the map

      -

      Paris is one of the three survival mode maps that are included in the Collection 1 DLC pack. It is based on the multiplayer map of the same name, which is set in the streets and landmarks of Paris, France. The map features a large open area with several buildings, vehicles, bridges, fountains, and statues. The map also has a subway station that can be accessed from both sides.

      -

      The map offers a variety of challenges for survival mode players, such as:

      -
        -
      • The enemies are mostly infantry units, but they also include dogs, juggernauts, helicopters, suicide bombers, and riot shield squads.
      • -
      • The enemies can spawn from multiple directions and locations, making it hard to predict their movements.
      • -
      • The enemies can use cover and flanking routes to ambush or surround the players.
      • -
      • The map has limited cover and ammo crates, forcing the players to move around and scavenge for resources.
      • -
      • The map has several environmental hazards, such as exploding cars, gas tanks, and electrical wires.
      • -
      -

      How to download and install the file

      -

      To download and install the so survival mp paris.ff file, you need to have Call of Duty Modern Warfare 3 installed on your PC or console. You also need to purchase or download the Collection 1 DLC pack from your platform's store or website. Once you have done that, follow these steps:

      -
        -
      1. Launch Call of Duty Modern Warfare 3 from your PC or console.
      2. -
      3. Select Multiplayer from the main menu.
      4. -
      5. Select Store from the multiplayer menu.
      6. -
      7. Select Collection 1 from the store menu.
      8. -
      9. Select Download from the Collection 1 menu.
      10. -
      11. Wait for the download to complete.
      12. -
      13. Select Back from the store menu.
      14. -
      15. Select Special Ops from the multiplayer menu.
      16. -
      17. Select Survival Mode from the special ops menu.
      18. -
      19. Select Paris from the survival mode menu.
      20. -
      21. Enjoy playing Call of Duty Modern Warfare 3 So Survival Mp Paris.ff!
      22. -
      -

      How to play Call of Duty Modern Warfare 3 So Survival Mp Paris.ff?

      -

      The basic rules and objectives of survival mode

      -

      Survival mode is a cooperative mode that pits one or two players against endless waves of enemies. The mode can be played online or offline with a friend or an AI partner. The mode has four difficulty levels: Regular, Hardened, Veteran, and Insane. The mode also has leaderboards that rank players based on their scores.

      -

      How to play so survival mode in call of duty modern warfare 3
      -Call of duty modern warfare 3 so survival mp paris.ff download
      -Best weapons and strategies for so survival mp paris in call of duty modern warfare 3
      -Call of duty modern warfare 3 so survival mp paris.ff error fix
      -Call of duty modern warfare 3 so survival mp paris.ff missing or corrupted
      -Call of duty modern warfare 3 so survival mp paris.ff file location
      -Call of duty modern warfare 3 so survival mp paris.ff mod
      -Call of duty modern warfare 3 so survival mp paris.ff cheats and hacks
      -Call of duty modern warfare 3 so survival mp paris.ff gameplay video
      -Call of duty modern warfare 3 so survival mp paris.ff review and rating
      -Call of duty modern warfare 3 so survival mp paris.ff tips and tricks
      -Call of duty modern warfare 3 so survival mp paris.ff leaderboard and ranking
      -Call of duty modern warfare 3 so survival mp paris.ff online multiplayer
      -Call of duty modern warfare 3 so survival mp paris.ff system requirements
      -Call of duty modern warfare 3 so survival mp paris.ff steam key
      -Call of duty modern warfare 3 so survival mp paris.ff free download
      -Call of duty modern warfare 3 so survival mp paris.ff update and patch
      -Call of duty modern warfare 3 so survival mp paris.ff wiki and guide
      -Call of duty modern warfare 3 so survival mp paris.ff achievements and trophies
      -Call of duty modern warfare 3 so survival mp paris.ff map and layout
      -Call of duty modern warfare 3 so survival mp paris.ff weapons and perks
      -Call of duty modern warfare 3 so survival mp paris.ff enemies and waves
      -Call of duty modern warfare 3 so survival mp paris.ff glitches and bugs
      -Call of duty modern warfare 3 so survival mp paris.ff easter eggs and secrets
      -Call of duty modern warfare 3 so survival mp paris.ff soundtrack and music
      -How to unlock so survival mp paris in call of duty modern warfare 3
      -How to install call of duty modern warfare 3 so survival mp paris.ff
      -How to backup call of duty modern warfare 3 so survival mp paris.ff
      -How to delete call of duty modern warfare 3 so survival mp paris.ff
      -How to edit call of duty modern warfare 3 so survival mp paris.ff
      -How to fix call of duty modern warfare 3 so survival mp paris.ff lag and stuttering
      -How to get call of duty modern warfare 3 so survival mp paris.ff for free
      -How to host call of duty modern warfare 3 so survival mp paris.ff server
      -How to join call of duty modern warfare 3 so survival mp paris.ff server
      -How to make call of duty modern warfare 3 so survival mp paris.ff run faster
      -How to play call of duty modern warfare 3 so survival mp paris.ff offline
      -How to record call of duty modern warfare 3 so survival mp paris.ff gameplay
      -How to stream call of duty modern warfare 3 so survival mp paris.ff on twitch or youtube
      -How to transfer call of duty modern warfare 3 so survival mp paris.ff to another pc or laptop
      -How to update call of duty modern warfare 3 so survival mp paris.ff manually or automatically
      -Is call of duty modern warfare 3 so survival mp paris.ff worth playing in 2021?
      -What is the best difficulty level for call of duty modern warfare 3 so survival mp paris.ff?
      -What is the best loadout for call of duty modern warfare 3 so survival mp paris.ff?
      -What is the best team size for call of duty modern warfare 3 so survival mp paris.ff?
      -What is the best time to play call of duty modern warfare 3 so survival mp paris.ff?
      -What is the difference between call of duty modern warfare 3 so survival mode and spec ops mode?
      -What is the highest wave reached in call of duty modern warfare 3 so survival mp paris?
      -What is the plot and story behind call of duty modern warfare 3 so survival mode?
      -What are the pros and cons of playing call of duty modern warfare 3 so survival mode?

      -

      The basic rules and objectives of survival mode are:

      -
        -
      • The players start with a pistol, a knife, a frag grenade, a flashbang grenade, and $500.
      • -
      • The players can buy weapons, equipment, perks, air support, and self-revive kits from different stations around the map using money earned from killing enemies.
      • -
      • The players can also find weapons dropped by enemies or hidden in crates around the map.
      • -
      • The players have to survive as long as possible by killing enemies and avoiding damage.
      • -
      • The players have three lives each. If one player dies, they can be revived by their partner or by using a self-revive kit. If both players die or run out of lives, the game ends.
      • -
      • The enemies come in waves that increase in number, difficulty, and variety as the game progresses.
      • -
      • The enemies can drop dog tags that give extra money when collected by the players.
      • -
      • The players can earn bonuses for completing challenges such as killing enemies with headshots or melee attacks.
      • -
      -

      The best weapons, perks and strategies for the map

      -

      Paris is a large map that requires mobility, accuracy, and versatility. The best weapons for this map are:

      -
        -
      • Assault rifles: They offer good range, damage, accuracy, and magazine size. They are ideal for engaging enemies at medium to long distances. Some examples are M4A1, AK-47, ACR 6.8, SCAR-L, and G36C.
      • -
      • Submachine guns: They offer high fire rate, mobility, and hip fire accuracy. They are ideal for close quarters combat and rushing enemies. Some examples are MP5, UMP45, PP90M1, P90, and MP7.
      • -
      • Light machine guns: They offer high damage, penetration, and ammo capacity. They are ideal for suppressing enemies and holding choke points. Some examples are M60E4, PKP Pecheneg, MK46, L86 LSW, and MG36.
      • -
      • Shotguns: They offer high damage, spread, and one-shot kill potential. They are ideal for clearing rooms and taking out multiple enemies at once. Some examples are USAS 12, KSG 12, SPAS-12, AA-12, and Striker.
      • -
      • Semi-automatic rifles: They offer high damage, accuracy, and range. They are ideal for picking off enemies and sniping them from afar. Some examples are MK14, G3A4, M14 EBR, ACR 6.8 (single fire), and FAL OSW.
      • -
      -

      The best perks for this map are:

      -
        -
      • Sleight of Hand: It increases your reload speed, which is crucial for survival mode Continuing the article from where I left off:

        The best perks for this map are:

        -
          -
        • Sleight of Hand: It increases your reload speed, which is crucial for survival mode as you don't want to run out of ammo in the middle of a firefight.
        • -
        • Hardline: It reduces the number of kills required for your killstreaks, which can give you an edge over the enemies by providing air support or extra firepower.
        • -
        • Blast Shield: It reduces the damage taken from explosives and fire, which can save your life from the frequent grenade spam and environmental hazards on the map.
        • -
        • Stalker: It increases your movement speed while aiming down sights, which can help you strafe and dodge enemy fire while maintaining accuracy.
        • -
        • Marksman: It allows you to identify enemy targets at longer range, which can help you spot and eliminate snipers and RPG users before they can harm you.
        • -
        -

        The best strategies for this map are:

        -
          -
        • Use cover and elevation: The map has plenty of buildings and structures that can provide cover from enemy fire and give you a height advantage. Use them to your benefit and avoid staying in the open for too long.
        • -
        • Watch your flanks and back: The enemies can come from any direction and use different routes to reach you. Be aware of your surroundings and check your corners and blind spots frequently. Use claymores, proximity mines, or sentry guns to secure your flanks and back.
        • -
        • Stick together and communicate: Survival mode is a cooperative mode that requires teamwork and coordination. Stay close to your partner and communicate with them using voice chat or pings. Share ammo, equipment, and money with them and revive them when they are downed.
        • -
        • Prioritize your targets and use killstreaks wisely: Some enemies are more dangerous than others and should be taken out first. For example, juggernauts, helicopters, suicide bombers, and riot shield squads. Use your killstreaks to deal with them quickly and efficiently. Save your killstreaks for when you really need them and don't waste them on low-level enemies.
        • -
        • Be flexible and adaptable: Survival mode is unpredictable and dynamic. The enemies will change their tactics and weapons as the game progresses. You need to be flexible and adaptable to survive. Change your weapons, perks, and strategies according to the situation and don't be afraid to try new things.
        • -
        -

        How to fix Call of Duty Modern Warfare 3 So Survival Mp Paris.ff errors?

        -

        The common errors and their causes

        -

        Some players may encounter errors when trying to play Call of Duty Modern Warfare 3 So Survival Mp Paris.ff. These errors can prevent them from launching or playing the game properly. Some of the common errors and their causes are:

        -
          -
        • Error: Could not find zone so_survival_mp_paris.ff: This error means that the game cannot locate the file that contains the data for the Paris map. This could be due to a corrupted or missing file, a wrong file name, or a compatibility issue.
        • -
        • Error: Attempting to override asset maps/mp/mp_interchange.d3dbsp from zone so_survival_mp_paris with zone mp_interchange: This error means that the game is trying to load two different files that have conflicting data for the same map. This could be due to a modded or hacked file, a duplicate file, or a mismatched file version.
        • -
        • Error: FFMPP Error Solution: This error means that the game cannot load the FFMPP file format that is used for some of the survival mode maps. This could be due to a corrupted or missing file, a wrong file extension, or a compatibility issue.
        • -
        -

        The solutions and workarounds for each error

        -

        The solutions and workarounds for each error may vary depending on the cause and the platform of the game. However, some of the general steps that can help fix these errors are:

        -
          -
        • Verify the integrity of game files: This step can help detect and repair any corrupted or missing files that may cause errors. To do this on PC, go to Steam > Library > Right-click on Call of Duty Modern Warfare 3 > Properties > Local Files > Verify Integrity of Game Files. To do this on console, go to Settings > System > Storage > Select Call of Duty Modern Warfare 3 > Manage Game > Saved Data > Delete All.
        • -
        • Rename or delete the problematic file: This step can help resolve any errors caused by a wrong file name or a duplicate file. To do this on PC, go to C:\Program Files (x86)\Steam\steamapps\common\Call of Duty Modern Warfare 3\zone\english (or your language folder) > Find the problematic file (such as so_survival_mp_paris.ff) > Rename it (such as so_survival_mp_paris.bak) or delete it. To do this on console, go to Settings > System > Storage > Select Call of Duty Modern Warfare 3 > Manage Game > Saved Data > Find the problematic file (such as so_survival_mp_paris.ff) > Delete it.
        • -
        • Reinstall or update the game or the DLC pack: This step can help resolve any errors caused by a corrupted or outdated file or a compatibility issue. To do this on PC, go to Steam > Library > Right-click on Call of Duty Modern Warfare 3 > Uninstall > Confirm > Download again. To do this on console, go to Settings > System > Storage > Select Call of Duty Modern Warfare 3 > Manage Game > Uninstall All > Confirm > Download again.
        • -
        • Disable any mods or hacks: This step can help resolve any errors caused by a modded or hacked file that may interfere with the game's normal functioning. To do this on PC, go to C:\Program Files (x86)\Steam\steamapps\common\Call of Duty Modern Warfare 3\zone\english (or your language folder) > Find any modded or hacked files (such as mp_interchange.d3dbsp) > Rename them (such as mp_interchange.bak) or delete them. To do this on console, go to Settings > System > Storage > Select Call of Duty Modern Warfare 3 > Manage Game > Saved Data > Find any modded or hacked files (such as mp_interchange.d3dbsp) > Delete them.
        • -
        -

        The sources and resources for further help

        -

        If none of these steps work for you, you may need to contact Activision support or visit their official forums for further help. You can also check out these sources and resources for more information and solutions:

        - -

        Conclusion

        -

        In conclusion, Call of Duty Modern Warfare 3 So Survival Mp Paris.ff is a fun and challenging survival mode map that tests your skills and teamwork against endless waves of enemies in the streets of Paris. You need to download and install the Collection 1 DLC pack to play it, as well as follow some tips and strategies to survive longer and get higher scores. If you encounter any errors while playing it, you can try some solutions and workarounds that we have provided in this article, or contact Activision support or visit their official forums for further help.

        -

        We hope you enjoyed this article and learned something new about Call of Duty Modern Warfare 3 So Survival Mp Paris.ff. If you did, please share it with your friends who might also be interested in this topic. Thank you for reading!

        -

        Frequently Asked Questions

        -
          -
        1. Q: How many players can play survival mode in Call of Duty Modern Warfare 3?

          -

          A: Survival mode in Call of Duty Modern Warfare 3 can be played by one or two players online or offline with a friend or an AI partner.

        2. -
        3. Q: How many survival Continuing the article from where I left off:

            -
          1. Q: How many players can play survival mode in Call of Duty Modern Warfare 3?

            -

            A: Survival mode in Call of Duty Modern Warfare 3 can be played by one or two players online or offline with a friend or an AI partner.

          2. -
          3. Q: How many survival mode maps are there in Call of Duty Modern Warfare 3?

            -

            A: There are 16 survival mode maps in Call of Duty Modern Warfare 3, including the three maps that are part of the Collection 1 DLC pack (Liberation, Piazza, and Paris).

          4. -
          5. Q: What is the highest wave that can be reached in survival mode in Call of Duty Modern Warfare 3?

            -

            A: There is no limit to the number of waves that can be reached in survival mode in Call of Duty Modern Warfare 3. However, the enemies will become more numerous, difficult, and varied as the game progresses, making it harder to survive.

          6. -
          7. Q: What are the best killstreaks to use in survival mode in Call of Duty Modern Warfare 3?

            -

            A: The best killstreaks to use in survival mode in Call of Duty Modern Warfare 3 depend on your personal preference and playstyle. However, some of the most useful and popular killstreaks are Predator Missile, Sentry Gun, Delta Squad, Riot Shield Squad, and AC-130.

          8. -
          9. Q: How can I play survival mode with my friends in Call of Duty Modern Warfare 3?

            -

            A: To play survival mode with your friends in Call of Duty Modern Warfare 3, you need to have a PS Plus Membership on PS4, an Xbox Live Gold Membership on Xbox One, or a Steam account on PC. Then, you need to invite your friends to your party or join their party from the multiplayer menu. After that, you can select Special Ops > Survival Mode > Choose a map > Start Game.

          10. -
          -

          0a6ba089eb
          -
          -
          \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Donna Tartt The Secret History Mobi Torrent Download the Bestselling Novel Now.md b/spaces/raedeXanto/academic-chatgpt-beta/Donna Tartt The Secret History Mobi Torrent Download the Bestselling Novel Now.md deleted file mode 100644 index 9517960787c0f2fc776490e1565bb74951b6ba23..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Donna Tartt The Secret History Mobi Torrent Download the Bestselling Novel Now.md +++ /dev/null @@ -1,95 +0,0 @@ -
          -

          Donna Tartt The Secret History Mobi Torrent

          -

          If you are looking for a way to read one of the most captivating and influential novels of the 20th century online for free, you might be interested in finding a mobi torrent file of The Secret History by Donna Tartt. In this article, we will tell you everything you need to know about this book, how to download it as a mobi torrent, how to open and read it on your device, what are the benefits and risks of using mobi torrents, what are some alternatives to mobi torrents for reading this book, and what are some other books by Donna Tartt or similar to The Secret History that you might enjoy.

          -

          Donna Tartt The Secret History Mobi Torrent


          DOWNLOADhttps://tinourl.com/2uL4nc



          -

          What is The Secret History by Donna Tartt?

          -

          The Secret History is the debut novel by American writer Donna Tartt, published in 1992. It is a contemporary literary thriller that follows a group of six students at a prestigious New England college who study ancient Greek under a charismatic professor. They become fascinated by the idea of living according to the principles of Bacchus, the god of wine and ecstasy, and perform a secret ritual that leads them to commit a terrible crime. As they try to cover up their deed, they are haunted by guilt, paranoia, betrayal and violence.

          -

          The novel is written in the first-person perspective of Richard Papen, one of the students who joins the group after transferring from a boring California college. He narrates the events that happened several years ago, revealing his admiration, envy, love and hatred for his fellow students: Henry Winter, a brilliant and mysterious leader; Francis Abernathy, a wealthy and flamboyant aesthete; Charles Macaulay, a handsome and athletic twin; Camilla Macaulay, Charles' sister and object of Richard's obsession; and Edmund "Bunny" Corcoran, a cheerful but annoying troublemaker.

          -

          The novel explores themes such as morality, beauty, friendship, identity, class, secrets, lies, fate and free will. It is influenced by classical literature, especially Greek tragedy and philosophy. It also draws inspiration from real-life events such as the Leopold and Loeb case and the Bennington College murders. It is written in an elegant and captivating style that combines suspense, humor, irony and lyricism.

          -

          Why is The Secret History a popular and acclaimed book?

          -

          The Secret History was an instant bestseller when it was published in 1992. It received rave reviews from critics who praised its originality, sophistication, intelligence and depth. It won several awards such as the WH Smith Literary Award in 1993 and was nominated for others such as the Edgar Award for Best First Novel in 1993. It has been translated into more than 30 languages and has sold more than 5 million copies worldwide.

          -

          The Secret History has also influenced many writers and artists who have acknowledged its impact on their work. For example, Bret Easton Ellis dedicated his novel Lunar Park (2005) to Donna Tartt; Tana French named her debut novel In the Woods (2007) after a phrase from The Secret History; Marisha Pessl used quotes from The Secret History as epigraphs for her novel Special Topics in Calamity Physics (2006); David Fincher considered adapting The Secret History into a film but abandoned the project due to creative differences with Tartt.

          -```html

          and intellect or Camilla's beauty and grace. Some have even tried to emulate their lifestyle or visit their locations. There are numerous fan sites, blogs, forums, podcasts, videos and artworks dedicated to The Secret History. There are also online communities that discuss and analyze the novel in depth.

          -

          Donna Tartt The Secret History epub download
          -The Secret History by Donna Tartt pdf free
          -Donna Tartt The Secret History ebook torrent
          -The Secret History Donna Tartt mobi online
          -Donna Tartt The Secret History book review
          -The Secret History by Donna Tartt epub reader
          -Donna Tartt The Secret History pdf torrent
          -The Secret History Donna Tartt ebook free
          -Donna Tartt The Secret History mobi format
          -The Secret History by Donna Tartt pdf download
          -Donna Tartt The Secret History epub torrent
          -The Secret History Donna Tartt mobi free
          -Donna Tartt The Secret History book summary
          -The Secret History by Donna Tartt epub online
          -Donna Tartt The Secret History pdf online
          -The Secret History Donna Tartt ebook download
          -Donna Tartt The Secret History mobi torrent download
          -The Secret History by Donna Tartt pdf reader
          -Donna Tartt The Secret History epub free
          -The Secret History Donna Tartt mobi download
          -Donna Tartt The Secret History book analysis
          -The Secret History by Donna Tartt epub format
          -Donna Tartt The Secret History pdf format
          -The Secret History Donna Tartt ebook online
          -Donna Tartt The Secret History mobi reader
          -The Secret History by Donna Tartt pdf free download
          -Donna Tartt The Secret History epub download torrent
          -The Secret History Donna Tartt mobi online free
          -Donna Tartt The Secret History book quotes
          -The Secret History by Donna Tartt epub free download
          -Donna Tartt The Secret History pdf torrent download
          -The Secret History Donna Tartt ebook torrent download
          -Donna Tartt The Secret History mobi format download
          -The Secret History by Donna Tartt pdf online free
          -Donna Tartt The Secret History epub torrent download
          -The Secret History Donna Tartt mobi free download
          -Donna Tartt The Secret History book discussion questions
          -The Secret History by Donna Tartt epub online free
          -Donna Tartt The Secret History pdf online reader
          -The Secret History Donna Tartt ebook online free
          -Donna Tartt The Secret History mobi torrent magnet link
          -The Secret History by Donna Tartt pdf reader online
          -Donna Tartt The Secret History epub free online
          -The Secret History Donna Tartt mobi download torrent
          -Donna Tartt The Secret History book genre
          -The Secret History by Donna Tartt epub format download
          -Donna Tartt The Secret History pdf format download
          -The Secret History Donna Tartt ebook online reader
          -Donna Tartt The Secret History mobi reader online
          -The Secret History by Donna Tartt pdf free online reader

          -

          How to read The Secret History by Donna Tartt online for free?

          -

          If you want to read The Secret History by Donna Tartt online for free without buying or borrowing a copy, you might be tempted to look for a mobi torrent file of the novel. A mobi file is a format that is compatible with most e-readers and devices, such as Kindle, Nook, iPad, iPhone and Android. A torrent file is a small file that contains information about the location of a larger file that can be downloaded from a peer-to-peer network. In other words, a mobi torrent file of The Secret History is a file that allows you to download the novel as a mobi file from other users who have it on their computers.

          -

          To find and download a mobi torrent file of The Secret History, you will need two things: a torrent client and a torrent site. A torrent client is a software that enables you to connect to the peer-to-peer network and download files from other users. There are many free and reliable torrent clients available online, such as BitTorrent, uTorrent, qBittorrent and Vuze. A torrent site is a website that hosts and indexes torrent files for various types of content, such as movies, music, books and games. There are also many torrent sites that offer mobi files of books, such as RARBG, The Pirate Bay, 1337x and Torlock.

          -

          Once you have installed a torrent client on your device and found a torrent site that has a mobi torrent file of The Secret History, you can follow these steps to download the novel:

          -
            -
          1. Go to the torrent site and search for The Secret History by Donna Tartt MOBI.
          2. -
          3. Choose a torrent file that has a high number of seeders and leechers. Seeders are users who have the complete file and are sharing it with others. Leechers are users who are downloading the file but have not completed it yet. A high number of seeders and leechers indicates that the file is popular and reliable.
          4. -
          5. Click on the torrent file and download it to your device.
          6. -
          7. Open the torrent file with your torrent client and start downloading the mobi file of The Secret History.
          8. -
          9. Wait until the download is complete. The time it takes depends on the size of the file, the speed of your internet connection and the number of seeders and leechers.
          10. -
          11. Once the download is complete, you can open the mobi file with your e-reader app or device and start reading The Secret History.
          12. -
          -

          What are the benefits and risks of using mobi torrents?

          -

          Using mobi torrents to read books online for free has some benefits and risks that you should be aware of before deciding to do so. Here are some of them:

          -

          Benefits

          -
            -
          • You can access a large variety of books that might not be available or affordable in your region or country.
          • -
          • You can read books on any device that supports mobi files without having to buy or carry physical copies.
          • -
          • You can save money by not having to pay for books or subscription fees.
          • -
          • You can support authors who have given permission or encouragement for their books to be shared online for free.
          • -
          • You can discover new books and authors that you might not have heard of otherwise.
          • -
          -

          Risks

          -
            -
          • You might be breaking the law by downloading and reading books that are protected by copyright laws in your region or country.
          • -
          • You might be harming authors who rely on book sales for their income and recognition.
          • -
          • You might be exposing your device to viruses, malware or spyware that might be hidden in the mobi files or the torrent files.
          • -
          • You might be compromising your privacy and security by sharing your IP address and other information with other users on the peer-to-peer network.
          • -
          • You might be disappointed by the quality or accuracy of the mobi files, which might have errors, missing pages or incorrect information.
          • -
          - I'm finish

          0a6ba089eb
          -
          -
          \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl.md b/spaces/raedeXanto/academic-chatgpt-beta/ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl.md deleted file mode 100644 index 68dea79a7c6b9653d521f7bdda9670657735e508..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl.md +++ /dev/null @@ -1,163 +0,0 @@ - -

          ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl: A Complete Guide for VAG Car Owners

          -

          If you own a Volkswagen, Audi, Seat, Skoda, or any other car from the VAG group, you might have heard of ETKA. ETKA is an electronic catalog that contains complete information about spare parts and accessories for VAG cars, including minibuses. It is a handy tool that helps you find the right parts for your car, compare prices and availability, and order them online or offline.

          -

          In this article, we will show you how to install and use ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl on Windows 10 64-bit. This is a full version of ETKA that supports multiple languages and regions, and can be updated to version 8.1 or higher with some tweaks. We will also answer some frequently asked questions about ETKA and provide some tips and recommendations for using it.

          -

          ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl


          Download Zip ····· https://tinourl.com/2uL4h0



          -

          How to install ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl on Windows 10 64-bit

          -

          To install ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl on Windows 10 64-bit, you will need to follow these steps:

          -

          Download the required files from the links provided

          -

          The first step is to download the following files from the links provided below:

          - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          File nameDescriptionLink
          ETKA8_Germany_International_2018The main installation file for ETKA8(https://mega.nz/folder/HhxQVawa#wKgPQuriA-4ysiXD1czPPw)
          DLL-PATCHA patch file to fix DLL errors during installation(https://mega.nz/folder/HhxQVawa#wKgPQuriA-4ysiXD1czPPw)
          ApplicationX64An emulator file to run ETKA on 64-bit Windows[^1^ )](https://mega.nz/folder/HhxQVawa#wKgPQuriA-4ysiXD1czPPw)
          ETKA_UpdatesA folder containing update files for ETKA(https://mega.nz/folder/7w0y1Y6a#Wf4m9k8Z5lOZ2X0J3Lc7jg)
          ETKA8.1_32-64Bit_FIXA fix file to upgrade ETKA to version 8.1 and enable updates(https://mega.nz/folder/7w0y1Y6a#Wf4m9k8Z5lOZ2X0J3Lc7jg)
          -

          Make sure you have enough space on your hard drive to store these files, and use a reliable download manager to avoid interruptions or errors.

          -

          -

          Install ETKA8 and apply the DLL patch

          -

          The next step is to install ETKA8 and apply the DLL patch. To do this, follow these instructions:

          -
            -
          1. Extract the ETKA8_Germany_International_2018 file to a folder on your hard drive, such as C:\ETKA.
          2. -
          3. Run the setup.exe file as administrator and follow the installation wizard. Choose the language and region you prefer, and accept the license agreement.
          4. -
          5. When the installation is complete, do not launch ETKA yet. Instead, go to the folder where you extracted the DLL-PATCH file, and copy the two DLL files (hardlock.sys and multikey.sys) to the C:\Windows\System32 folder.
          6. -
          7. Restart your computer to apply the patch.
          8. -
          -

          Install the emulator and enter test mode

          -

          The third step is to install the emulator and enter test mode. This is necessary to bypass the hardware protection of ETKA and run it on 64-bit Windows. To do this, follow these steps:

          -
            -
          1. Extract the ApplicationX64 file to a folder on your hard drive, such as C:\Emulator.
          2. -
          3. Run the install.cmd file as administrator and wait for it to finish. You should see a message saying "Emulator installed successfully".
          4. -
          5. Open a command prompt as administrator and type the following command: bcdedit /set testsigning on. Press Enter and wait for a confirmation message.
          6. -
          7. Restart your computer to enter test mode. You should see a watermark on your desktop saying "Test Mode".
          8. -
          -

          Fix the desktop shortcut and launch ETKA8

          -

          The fourth step is to fix the desktop shortcut and launch ETKA8. To do this, follow these steps:

          -
            -
          1. Right-click on the ETKA8 shortcut on your desktop and choose Properties.
          2. -
          3. In the Target field, change the value from C:\ETKA\PROG\ETK.EXE to C:\ETKA\PROG\ETK8.EXE.
          4. -
          5. In the Start in field, change the value from C:\ETKA\PROG\ to C:\ETKA\PROG\DATA\.
          6. -
          7. Click OK to save the changes.
          8. -
          9. Double-click on the ETKA8 shortcut to launch ETKA. You should see a splash screen with version 7.5.0.0.
          10. -
          -

          Download and apply the update files

          -

          The fifth step is to download and apply the update files. This will update ETKA from version 7.5.0.0 to version 7.2.0.3285.FUL.(multilang). To do this, follow these steps:

          -
            -
          1. Go to the folder where you downloaded the ETKA_Updates folder, and extract it to a folder on your hard drive, such as C:\Updates.
          2. -
          3. Inside the Updates folder, you will see four subfolders: AU (for Audi), SE (for Seat), SK (for Skoda), and VW (for Volkswagen). Each subfolder contains update files for each brand.
          4. -
          5. To apply the updates, you need to copy each update file to its corresponding folder in C:\ETKA\UPDATE\. For example, if you want to update Audi, you need to copy all the files from C:\Updates\AU\ to C:\ETKA\UPDATE\AU\. Do this for each brand you want to update.
          6. -
          7. After copying all the update files, run the ETKA8 shortcut again and wait for the updates to be installed. You may see some error messages during the process, but you can ignore them. When the updates are done, you should see the version number change to 7.2.0.3285.FUL.(multilang).
          8. -
          -

          Upgrade to V8.1 and edit the registry to continue updates

          -

          The final step is to upgrade ETKA to version 8.1 and edit the registry to continue receiving updates. To do this, follow these steps:

          -
            -
          1. Go to the folder where you downloaded the ETKA8.1_32-64Bit_FIX file, and extract it to a folder on your hard drive, such as C:\Fix.
          2. -
          3. Run the ETKA8.1_32-64Bit_FIX.exe file as administrator and follow the instructions. This will upgrade ETKA to version 8.1 and fix some bugs.
          4. -
          5. Open the Registry Editor by typing regedit in the search box and pressing Enter.
          6. -
          7. Navigate to the following key: HKEY_LOCAL_MACHINE\SOFTWARE\WOW6432Node\LexCom\ETKA8.
          8. -
          9. Double-click on the UpdateCheck value and change it from 0 to 1. This will enable ETKA to check for updates automatically.
          10. -
          11. Close the Registry Editor and restart your computer.
          12. -
          -

          Congratulations, you have successfully installed ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl on Windows 10 64-bit!

          -

          How to use ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl for VAG cars

          -

          Now that you have installed ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl on your computer, you can use it to find and order spare parts and accessories for your VAG car. Here are some basic steps on how to use ETKA:

          -

          Select the brand and model of your car

          -

          The first thing you need to do is to select the brand and model of your car from the main menu of ETKA. You can choose from four brands: Audi, Volkswagen, Seat, or Skoda. After selecting a brand, you will see a list of models and years for that brand. You can either scroll through the list or type in the model name or code in the search box.

          -

          For example, if you have a 2019 Audi A4, you can type in A4 or B9 in the search box and select it from the list. You will then see a screen with information about your car, such as engine code, transmission code, body type, etc.

          -

          Browse the catalog by groups, subgroups, and illustrations

          -

          After selecting your car model, you can browse the catalog by groups, subgroups, and illustrations. Groups are categories of parts that belong to a certain system or function of your car, such as engine, transmission, suspension, brakes, etc. Subgroups are subcategories of parts within a group, such as cylinder head, clutch, shock absorber, brake disc, etc.

          -

          To browse by groups and subgroups, you can either use the buttons on the left side of the screen or click on the tabs at the top of the screen. For example, if you want to find parts for your engine, you can click on the Engine button or tab and then select a subgroup from the list.

          -

          Illustrations are drawings or diagrams that show the parts and their locations on your car. They are numbered and color-coded according to their group and subgroup. To browse by illustrations, you can either use the buttons on the right side of the screen or click on the thumbnails at the bottom of the screen.

          -

          For example, if you want to see an illustration of your engine block, you can click on the Engine Block button or thumbnail and then zoom in or out to see the details.

          -

          Search for parts by number, name, or VIN

          -

          If you already know the part number, name, or VIN of the part you are looking for, you can use the search function of ETKA to find it quickly. To search by part number, name, or VIN, you can either use the buttons on the top right corner of the screen or type in the search box at the bottom of the screen.

          -

          For example, if you want to find a water pump for your engine, you can type in water pump or 06L121111H (the part number) in the search box and press Enter. You will then see a list of results that match your query. You can click on any result to see more information about it, such as price, availability, compatibility, etc.

          -

          Compare prices and availability of parts

          -

          One of the benefits of using ETKA is that you can compare prices and availability of parts from different sources. ETKA has an online service that connects to various dealers and suppliers around the world and shows you their prices and stock levels for each part. You can also see the original price from the manufacturer and the recommended retail price for your region.

          -

          To compare prices and availability of parts, you need to have an active internet connection and a valid account for the online service. You can register for an account on the ETKA website or contact your local dealer for more information. Once you have an account, you can log in to the online service by clicking on the Online button on the top left corner of the screen.

          -

          After logging in, you can see a list of sources for each part on the right side of the screen. You can sort the list by price, availability, distance, or rating. You can also filter the list by country, region, currency, or delivery time. You can click on any source to see more details about it, such as contact information, shipping options, payment methods, etc.

          -

          Print or export the information you need

          -

          The last step is to print or export the information you need from ETKA. You can print or export any page or illustration from ETKA to a PDF file, an image file, a text file, or a spreadsheet file. You can also print or export a shopping list that contains all the parts you have selected for your car.

          -

          To print or export information from ETKA, you can either use the buttons on the top right corner of the screen or use the keyboard shortcuts Ctrl+P (for print) or Ctrl+E (for export). You will then see a dialog box where you can choose the format, destination, and options for your output. You can also preview your output before printing or exporting it.

          -

          Conclusion

          -

          In this article, we have shown you how to install and use ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl on Windows 10 64-bit. ETKA is a powerful and useful tool that helps you find and order spare parts and accessories for your VAG car. It has many features and functions that make it easy and convenient to use.

          -

          Here are some tips and recommendations for using ETKA:

          -
            -
          • Always backup your data before installing or updating ETKA.
          • -
          • Always run ETKA as administrator and in test mode.
          • -
          • Always check for updates regularly and apply them as soon as possible.
          • -
          • Always use a reliable antivirus program and firewall to protect your computer from malware and hackers.
          • -
          • Always read the user manual and help files for more information and guidance on using ETKA.
          • -
          -

          We hope you have enjoyed this article and learned something new. If you have any feedback or questions about ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl, please feel free to leave a comment below or contact us through our website. We would love to hear from you!

          -

          FAQs

          -

          What are the system requirements for ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl?

          -

          The minimum system requirements for ETKA V7.2.0.3285.FUL.(multilang) 64 Bitl are:

          -
            -
          • Operating system: Windows 10 64-bit
          • -
          • Processor: Intel Core i3 or equivalent
          • -
          • Memory: 4 GB RAM
          • -
          • Hard disk: 50 GB free space
          • -
          • Display: 1024 x 768 or higher
          • -
          • Internet connection: Required for online service and updates
          • -
          -

          How can I update ETKA to the latest version?

          -

          To update ETKA to the latest version, you can either use the automatic update function or download and apply the update files manually. To use the automatic update function, you need to have an active internet connection and a valid account for the online service. You also need to edit the registry to enable ETKA to check for updates, as explained in the installation section of this article.

          -

          To use the automatic update function, follow these steps:

          -
            -
          1. Launch ETKA and log in to the online service.
          2. -
          3. Click on the Update button on the top left corner of the screen.
          4. -
          5. Wait for ETKA to check for updates and download them.
          6. -
          7. Restart ETKA to apply the updates.
          8. -
          -

          To download and apply the update files manually, follow these steps:

          -
            -
          1. Go to the following website and download the latest update files for each brand: (https://www.tekkno.de/etka/)
          2. -
          3. Copy each update file to its corresponding folder in C:\ETKA\UPDATE\. For example, if you want to update Audi, you need to copy all the files from C:\Downloads\AU\ to C:\ETKA\UPDATE\AU\. Do this for each brand you want to update.
          4. -
          5. Run the ETKA shortcut and wait for the updates to be installed. You may see some error messages during the process, but you can ignore them.
          6. -
          7. Restart ETKA to apply the updates.
          8. -
          -

          How can I access the online service for ETKA?

          -

          To access the online service for ETKA, you need to have an active internet connection and a valid account for the online service. You can register for an account on the ETKA website or contact your local dealer for more information. Once you have an account, you can log in to the online service by clicking on the Online button on the top left corner of the screen.

          -

          The online service for ETKA allows you to compare prices and availability of parts from different sources, order parts online or offline, access technical information and documents, and get support and assistance from experts. You can also customize your settings and preferences for the online service, such as language, region, currency, delivery time, etc.

          -

          How can I change the language or region settings for ETKA?

          -

          To change the language or region settings for ETKA, follow these steps:

          -
            -
          1. Launch ETKA and click on the Settings button on the top right corner of the screen.
          2. -
          3. Select the Language tab and choose the language you prefer from the drop-down menu. You can choose from English, German, French, Spanish, Italian, Portuguese, Turkish, Russian, Polish, Czech, Hungarian, Romanian, Bulgarian, Croatian, Slovenian, Slovakian, Serbian, Dutch, Swedish, Danish, Norwegian, Finnish, Greek, Chinese, Japanese, or Korean.
          4. -
          5. Select the Region tab and choose the region you prefer from the drop-down menu. You can choose from Europe, North America, South America, Asia, Africa, or Australia.
          6. -
          7. Click OK to save the changes and restart ETKA to apply them.
          8. -
          -

          How can I contact the support team for ETKA?

          -

          To contact the support team for ETKA, you can either use the Help button on the top right corner of the screen or visit the ETKA website. The Help button will open a window where you can access the user manual, the help files, the FAQ section, and the contact form. You can also send an email to etka-support@lex-com.net or call +49 89 189 3120 0.

          -

          The ETKA website is (https://www.etka.com/). There you can find more information about ETKA, such as features, benefits, news, events, testimonials, etc. You can also register for an account for the online service or contact your local dealer for more information.

          -

          -

          This is the end of the article I have created for you. I hope you are satisfied with it and find it useful. If you have any feedback or questions about it, please let me know. Thank you for using Microsoft Bing search chat mode!

          b2dd77e56b
          -
          -
          \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Essentials Of Management Koontz Pdf Free Downloadrar The Classic Textbook on Management by Harold Koontz and Heinz Weihrich.md b/spaces/raedeXanto/academic-chatgpt-beta/Essentials Of Management Koontz Pdf Free Downloadrar The Classic Textbook on Management by Harold Koontz and Heinz Weihrich.md deleted file mode 100644 index 86437c563cb23dec504bba9964fcb81b381a78b9..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Essentials Of Management Koontz Pdf Free Downloadrar The Classic Textbook on Management by Harold Koontz and Heinz Weihrich.md +++ /dev/null @@ -1,81 +0,0 @@ -
          -

          Essentials of Management Koontz PDF Free Downloadrar

          -

          If you are looking for a comprehensive and practical guide to management, you might want to check out Essentials of Management by Harold Koontz and Heinz Weihrich. This book is one of the most popular and widely used management textbooks in the world. It covers all the essential topics and concepts that managers need to know and apply in their work. In this article, we will tell you everything you need to know about this book, including how to get it for free in PDF format. Read on to find out more!

          -

          Essentials Of Management Koontz Pdf Free Downloadrar


          Download File 🗸🗸🗸 https://tinourl.com/2uL3Ev



          -

          What is Essentials of Management?

          -

          Essentials of Management is a book that provides an international, innovation, and leadership perspective on management. It was first published in 1976 by Harold Koontz and Cyril O'Donnell. Later editions were co-authored by Heinz Weihrich. The latest edition is the 11th one, which was published in 2019 by McGraw-Hill Education. The book has been translated into several languages and has sold millions of copies worldwide.

          -

          Harold Koontz was a professor of business management at the University of California, Los Angeles. He was also a consultant for many organizations and a prolific writer. Heinz Weihrich is a professor emeritus of global management and behavioral science at the University of San Francisco. He has also taught at various universities around the world and has authored or co-authored several books on management.

          -

          Why is Essentials of Management important?

          -

          Essentials of Management is important because it offers a comprehensive and practical approach to management that is relevant for managers in any organization or industry. It covers all the basic functions and skills that managers need to perform effectively, such as planning, organizing, staffing, leading, and controlling. It also incorporates the latest trends and developments in management theory and practice, such as globalization, innovation, diversity, ethics, social responsibility, sustainability, entrepreneurship, e-business, quality management, crisis management, etc.

          -

          The book is also important because it is designed for both students and practitioners of management. It is suitable for undergraduate and graduate courses in business administration, management science, organizational behavior, human resource management, etc. It is also useful for managers who want to update their knowledge and skills or prepare for professional exams or certifications. The book provides clear explanations, examples, cases, exercises, self-assessments, and review questions that help readers understand and apply the concepts.

          -

          How to get Essentials of Management for free?

          -

          If you want to get Essentials of Management for free in PDF format, you have several options. One option is to search online for websites that offer free downloads or online reading of books. However, you should be careful about the quality and legality of these sources, as some of them may contain viruses, malware, or pirated content. Another option is to use online platforms or apps that allow you to borrow or rent books from libraries or other users. Some examples of these platforms or apps are OverDrive, Libby, Scribd, etc. However, you may need to register or subscribe to access these services, and they may have limited availability or duration of books. A third option is to use academic databases or repositories that provide access to scholarly publications, including books, journals, articles, etc. Some examples of these databases or repositories are Google Scholar, ResearchGate, Academia.edu, etc. However, you may need to be affiliated with an academic institution or organization to access these resources, and they may not have the latest editions or versions of books.

          -

          What are the main topics covered in Essentials of Management?

          -

          Essentials of Management covers a wide range of topics related to management. The book is divided into six parts, each consisting of several chapters. The following is a summary of the main topics covered in each part:

          -

          The Nature and Essence of Management

          -

          This part introduces the definition, functions, skills, and roles of management. It also discusses the historical development, current challenges, and future trends of management theory and practice.

          -

          Planning

          -

          This part explains the process, types, tools, and techniques of planning. It also covers the topics of strategic management, environmental analysis, goal setting, policy formulation, decision making, and innovation.

          -

          Organizing

          -

          This part describes the principles, structure, design, and coordination of organizing. It also covers the topics of authority, responsibility, delegation, centralization, decentralization, departmentation, span of control, line-staff relationships, matrix organizations, teamwork, as some of them may contain viruses, malware, or pirated content. Another option is to use online platforms or apps that allow you to borrow or rent books from libraries or other users. Some examples of these platforms or apps are OverDrive, Libby, Scribd, etc. However, you may need to register or subscribe to access these services, and they may have limited availability or duration of books. A third option is to use academic databases or repositories that provide access to scholarly publications, including books, journals, articles, etc. Some examples of these databases or repositories are Google Scholar, ResearchGate, Academia.edu, etc. However, you may need to be affiliated with an academic institution or organization to access these resources, and they may not have the latest editions or versions of books.

        4. -
        5. What are the main topics covered in Essentials of Management? - Essentials of Management covers a wide range of topics related to management. The book is divided into six parts, each consisting of several chapters. The main topics covered in each part are: The Nature and Essence of Management; Planning; Organizing; Staffing; Leading; and Controlling.
        6. -
        7. What are the features and advantages of Essentials of Management? - Essentials of Management has many features and advantages that make it a valuable and reliable source of information and guidance for managers and students. Some of these features and advantages are: It is comprehensive and covers all the essential topics and concepts that managers need to know and apply in their work; It is practical and provides real-world examples, cases, exercises, self-assessments, and review questions that help readers understand and apply the concepts; It is international and incorporates the global perspective and challenges of management in different countries and cultures; It is innovative and reflects the latest trends and developments in management theory and practice; It is leadership-oriented and emphasizes the role and importance of leadership in management; It is user-friendly and has a clear and concise writing style, a logical and coherent structure, a colorful and attractive design, and a glossary and index for easy reference.
        8. -
        9. What are the challenges and limitations of Essentials of Management? - Essentials of Management also has some challenges and limitations that readers should be aware of. Some of these challenges and limitations are: It is expensive and may not be affordable for some readers or institutions; It is lengthy and may not be suitable for some courses or purposes that require a shorter or more focused text; It is complex and may not be easy for some readers or students to comprehend or retain all the information and concepts; It is dynamic and may not be up-to-date with some of the rapidly changing aspects or issues of management; It is subjective and may not reflect or agree with some of the opinions or preferences of different readers or experts; It is generic and may not address or cover some of the specific or unique needs or situations of different organizations or industries.
        10. -
        11. How to use Essentials of Management effectively? - If you want to use Essentials of Management effectively, you should follow some tips and suggestions that can help you get the most out of this book. Some of these tips and suggestions are: Read the book carefully and thoroughly. Pay attention to the definitions, explanations, examples, cases, exercises, self-assessments, and review questions. Try to understand and apply the concepts in your own context; Use the book as a reference. Consult the book whenever you need to refresh your memory or clarify your doubts about any topic or concept. Use the glossary and index to find the information you need quickly; Use the book as a guide. Follow the book's recommendations and advice on how to plan, organize, staff, lead, and control effectively. Use the book's tools and techniques to improve your performance and results; Use the book as a source. Cite the book when you write reports or papers on management topics. Acknowledge the book's authors and sources when you use their ideas or information; Use the book as a companion. Share the book with your colleagues or classmates. Discuss the book's concepts and cases with them. Learn from their perspectives and experiences.
        12. -
        -

        -

        Essentials Of Management Koontz Ebook Download
        -Free Pdf Of Essentials Of Management By Harold Koontz
        -Essentials Of Management Koontz 9th Edition Pdf
        -Download Essentials Of Management Koontz Pdf Without Rar
        -Essentials Of Management Koontz And Weihrich Pdf Free
        -How To Get Essentials Of Management Koontz Pdf For Free
        -Essentials Of Management Koontz Book Pdf Download
        -Essentials Of Management By Koontz And O'Donnell Pdf Free
        -Essentials Of Management Harold Koontz Heinz Weihrich Pdf
        -Essentials Of Management Koontz Pdf Free Online
        -Essentials Of Management By Harold Koontz 10th Edition Pdf
        -Download Essentials Of Management By Koontz And Weihrich Pdf
        -Essentials Of Management Koontz Rar File Download
        -Essentials Of Management By Harold Koontz Pdf Free Ebook
        -Essentials Of Management Koontz And O'Donnell Pdf Download
        -Essentials Of Management Harold Koontz Pdf Free Download
        -Essentials Of Management By Koontz And Weihrich 8th Edition Pdf
        -Essentials Of Management Koontz Pdf No Rar Password
        -Essentials Of Management By Harold Koontz And Cyril O'Donnell Pdf
        -Essentials Of Management By Harold Koontz Heinz Weihrich Mark Cannice Pdf
        -Essentials Of Management By Harold Koontz 9th Edition Pdf Free Download
        -Essentials Of Management By Harold Koontz And Heinz Weihrich 7th Edition Pdf
        -Essentials Of Management By Harold Koontz And Heinz Weihrich 6th Edition Pdf
        -Essentials Of Management By Harold Koontz And Heinz Weihrich 5th Edition Pdf
        -Essentials Of Management By Harold Koontz And Heinz Weihrich 4th Edition Pdf
        -Essentials Of Management By Harold Koontz And Heinz Weihrich 3rd Edition Pdf
        -Essentials Of Management By Harold Koontz And Heinz Weihrich 2nd Edition Pdf
        -Essentials Of Management By Harold Koontz And Heinz Weihrich 1st Edition Pdf
        -Essentials Of Management By Harold Koontz And Heinz Weihrich Latest Edition Pdf
        -Essentials Of Management By Harold Koontz And Heinz Weihrich Old Editions Pdf
        -Essentials Of Management By Harold Koontz And Heinz Weihrich Indian Edition Pdf
        -Essentials Of Management By Harold Koontz And Heinz Weihrich International Edition Pdf
        -Essentials Of Management By Harold Koontz And Heinz Weihrich Revised Edition Pdf
        -Essentials Of Management By Harold Koontz And Heinz Weihrich Updated Edition Pdf
        -Essentials Of Management By Harold Koontz And Heinz Weihrich New Edition Pdf
        -Summary Of Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf
        -Review Of Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf
        -Analysis Of Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf
        -Notes On Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf
        -Study Guide For Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf
        -Solutions Manual For Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf
        -Test Bank For Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf
        -Case Studies For Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf
        -Exercises For Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf
        -Quizzes For Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf
        -Assignments For Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf
        -Projects For Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf
        -Presentations For Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf
        -Videos For Essentials Of Management By Harold Koontz And Heinz Weihrich Pdf

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Fallout 4 For Mac Os ((LINK)) Download.md b/spaces/raedeXanto/academic-chatgpt-beta/Fallout 4 For Mac Os ((LINK)) Download.md deleted file mode 100644 index 3b5abcc511621135fabd7ed60b7053372804648f..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Fallout 4 For Mac Os ((LINK)) Download.md +++ /dev/null @@ -1,37 +0,0 @@ - -

        How to Download and Play Fallout 4 on Mac OS

        -

        Fallout 4 is one of the most popular and acclaimed games of 2016, but unfortunately, it is not officially available for Mac OS. However, that does not mean that Mac users cannot enjoy this amazing open-world RPG. In this article, we will show you how to download and play Fallout 4 on Mac OS using two different methods: Boot Camp and Wine.

        -

        Fallout 4 For Mac Os Download


        Download File === https://tinourl.com/2uL5EW



        -

        Method 1: Boot Camp

        -

        Boot Camp is a built-in utility on Mac OS that allows you to install and run Windows on a separate partition of your hard drive. This way, you can launch Windows and run any game or application that is compatible with it, including Fallout 4. Here are the steps to use Boot Camp:

        -
          -
        1. Make sure you have enough free space on your Mac's hard drive. You will need at least 30 GB for Windows and another 30 GB for Fallout 4.
        2. -
        3. Download a Windows ISO file from Microsoft's website. You will need a valid product key to activate it later.
        4. -
        5. Open the Boot Camp Assistant app from the Utilities folder in your Applications folder.
        6. -
        7. Follow the on-screen instructions to create a Windows partition and install Windows on it.
        8. -
        9. Restart your Mac and hold the Option key to choose which operating system to boot into.
        10. -
        11. Select Windows and complete the setup process.
        12. -
        13. Download and install Steam on Windows.
        14. -
        15. Log in to your Steam account and purchase Fallout 4 if you haven't already.
        16. -
        17. Download and install Fallout 4 on Steam.
        18. -
        19. Launch Fallout 4 and enjoy!
        20. -
        -

        Method 2: Wine

        -

        Wine is a software that allows you to run Windows applications on Mac OS without installing Windows. It is not an emulator, but rather a compatibility layer that translates Windows API calls into Mac OS ones. Wine can run many games and applications, but not all of them. Fallout 4 is one of the games that can be played with Wine, but you may encounter some performance issues or bugs. Here are the steps to use Wine:

        -
          -
        1. Download and install Wine from its official website.
        2. -
        3. Download and install Winetricks from its official website.
        4. -
        5. Open a Terminal window and type the following command: winetricks --gui
        6. -
        7. Select "Install an application" and then "Steam". Follow the instructions to install Steam on Wine.
        8. -
        9. Launch Steam from the Wine menu in your Applications folder.
        10. -
        11. Log in to your Steam account and purchase Fallout 4 if you haven't already.
        12. -
        13. Download and install Fallout 4 on Steam.
        14. -
        15. Launch Fallout 4 and enjoy!
        16. -
        -

        Note: You may need to tweak some settings in Wine or Fallout 4 to improve the performance or fix some issues. You can check online forums or guides for more tips and tricks.

        -

        - -

        Conclusion

        -

        Fallout 4 is a fantastic game that offers a rich and immersive experience in a post-apocalyptic world. However, Mac users may face some challenges in playing it, as it is not officially supported by Mac OS. Fortunately, there are two methods that can help you download and play Fallout 4 on Mac OS: Boot Camp and Wine. Both methods have their pros and cons, and you can choose the one that suits you best. Boot Camp allows you to run Fallout 4 natively on Windows, but it requires more disk space and a reboot every time you want to switch between operating systems. Wine allows you to run Fallout 4 on Mac OS without installing Windows, but it may cause some performance issues or bugs. Either way, you can enjoy Fallout 4 on your Mac with some patience and effort.

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/rahul2001/student_performance/ReadMe.md b/spaces/rahul2001/student_performance/ReadMe.md deleted file mode 100644 index a3964ddf7da82cf3864240792c579269b431d48c..0000000000000000000000000000000000000000 --- a/spaces/rahul2001/student_performance/ReadMe.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Student Performance -emoji: 📈 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.44.3 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/rahul2001/student_performance/setup.py b/spaces/rahul2001/student_performance/setup.py deleted file mode 100644 index fe38b623aa065ad9ef5967ae37ee0bd8d7f01e25..0000000000000000000000000000000000000000 --- a/spaces/rahul2001/student_performance/setup.py +++ /dev/null @@ -1,19 +0,0 @@ -from setuptools import find_packages,setup -HYPE_E_DOT = '-e .' -def get_req(path_file:str): - requirements = [] - with open(path_file) as file_obj: - requirements = file_obj.readlines() - requirements = [req.replace("\n","") for req in requirements] - - if HYPE_E_DOT in requirements: - requirements.remove(HYPE_E_DOT) - return requirements - -setup( - name = "ml_project", - author= "Rahul", - version= "0.0.1", - packages= find_packages(), - install_require = get_req("requirements.txt") - ) \ No newline at end of file diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/console.d.ts b/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/console.d.ts deleted file mode 100644 index 16c9137adf20cd8eaad74c61819ff6e300205b7a..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/console.d.ts +++ /dev/null @@ -1,412 +0,0 @@ -/** - * The `console` module provides a simple debugging console that is similar to the - * JavaScript console mechanism provided by web browsers. - * - * The module exports two specific components: - * - * * A `Console` class with methods such as `console.log()`, `console.error()` and`console.warn()` that can be used to write to any Node.js stream. - * * A global `console` instance configured to write to `process.stdout` and `process.stderr`. The global `console` can be used without calling`require('console')`. - * - * _**Warning**_: The global console object's methods are neither consistently - * synchronous like the browser APIs they resemble, nor are they consistently - * asynchronous like all other Node.js streams. See the `note on process I/O` for - * more information. - * - * Example using the global `console`: - * - * ```js - * console.log('hello world'); - * // Prints: hello world, to stdout - * console.log('hello %s', 'world'); - * // Prints: hello world, to stdout - * console.error(new Error('Whoops, something bad happened')); - * // Prints error message and stack trace to stderr: - * // Error: Whoops, something bad happened - * // at [eval]:5:15 - * // at Script.runInThisContext (node:vm:132:18) - * // at Object.runInThisContext (node:vm:309:38) - * // at node:internal/process/execution:77:19 - * // at [eval]-wrapper:6:22 - * // at evalScript (node:internal/process/execution:76:60) - * // at node:internal/main/eval_string:23:3 - * - * const name = 'Will Robinson'; - * console.warn(`Danger ${name}! Danger!`); - * // Prints: Danger Will Robinson! Danger!, to stderr - * ``` - * - * Example using the `Console` class: - * - * ```js - * const out = getStreamSomehow(); - * const err = getStreamSomehow(); - * const myConsole = new console.Console(out, err); - * - * myConsole.log('hello world'); - * // Prints: hello world, to out - * myConsole.log('hello %s', 'world'); - * // Prints: hello world, to out - * myConsole.error(new Error('Whoops, something bad happened')); - * // Prints: [Error: Whoops, something bad happened], to err - * - * const name = 'Will Robinson'; - * myConsole.warn(`Danger ${name}! Danger!`); - * // Prints: Danger Will Robinson! Danger!, to err - * ``` - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/console.js) - */ -declare module 'console' { - import console = require('node:console'); - export = console; -} -declare module 'node:console' { - import { InspectOptions } from 'node:util'; - global { - // This needs to be global to avoid TS2403 in case lib.dom.d.ts is present in the same build - interface Console { - Console: console.ConsoleConstructor; - /** - * `console.assert()` writes a message if `value` is [falsy](https://developer.mozilla.org/en-US/docs/Glossary/Falsy) or omitted. It only - * writes a message and does not otherwise affect execution. The output always - * starts with `"Assertion failed"`. If provided, `message` is formatted using `util.format()`. - * - * If `value` is [truthy](https://developer.mozilla.org/en-US/docs/Glossary/Truthy), nothing happens. - * - * ```js - * console.assert(true, 'does nothing'); - * - * console.assert(false, 'Whoops %s work', 'didn\'t'); - * // Assertion failed: Whoops didn't work - * - * console.assert(); - * // Assertion failed - * ``` - * @since v0.1.101 - * @param value The value tested for being truthy. - * @param message All arguments besides `value` are used as error message. - */ - assert(value: any, message?: string, ...optionalParams: any[]): void; - /** - * When `stdout` is a TTY, calling `console.clear()` will attempt to clear the - * TTY. When `stdout` is not a TTY, this method does nothing. - * - * The specific operation of `console.clear()` can vary across operating systems - * and terminal types. For most Linux operating systems, `console.clear()`operates similarly to the `clear` shell command. On Windows, `console.clear()`will clear only the output in the - * current terminal viewport for the Node.js - * binary. - * @since v8.3.0 - */ - clear(): void; - /** - * Maintains an internal counter specific to `label` and outputs to `stdout` the - * number of times `console.count()` has been called with the given `label`. - * - * ```js - * > console.count() - * default: 1 - * undefined - * > console.count('default') - * default: 2 - * undefined - * > console.count('abc') - * abc: 1 - * undefined - * > console.count('xyz') - * xyz: 1 - * undefined - * > console.count('abc') - * abc: 2 - * undefined - * > console.count() - * default: 3 - * undefined - * > - * ``` - * @since v8.3.0 - * @param label The display label for the counter. - */ - count(label?: string): void; - /** - * Resets the internal counter specific to `label`. - * - * ```js - * > console.count('abc'); - * abc: 1 - * undefined - * > console.countReset('abc'); - * undefined - * > console.count('abc'); - * abc: 1 - * undefined - * > - * ``` - * @since v8.3.0 - * @param label The display label for the counter. - */ - countReset(label?: string): void; - /** - * The `console.debug()` function is an alias for {@link log}. - * @since v8.0.0 - */ - debug(message?: any, ...optionalParams: any[]): void; - /** - * Uses `util.inspect()` on `obj` and prints the resulting string to `stdout`. - * This function bypasses any custom `inspect()` function defined on `obj`. - * @since v0.1.101 - */ - dir(obj: any, options?: InspectOptions): void; - /** - * This method calls `console.log()` passing it the arguments received. - * This method does not produce any XML formatting. - * @since v8.0.0 - */ - dirxml(...data: any[]): void; - /** - * Prints to `stderr` with newline. Multiple arguments can be passed, with the - * first used as the primary message and all additional used as substitution - * values similar to [`printf(3)`](http://man7.org/linux/man-pages/man3/printf.3.html) (the arguments are all passed to `util.format()`). - * - * ```js - * const code = 5; - * console.error('error #%d', code); - * // Prints: error #5, to stderr - * console.error('error', code); - * // Prints: error 5, to stderr - * ``` - * - * If formatting elements (e.g. `%d`) are not found in the first string then `util.inspect()` is called on each argument and the resulting string - * values are concatenated. See `util.format()` for more information. - * @since v0.1.100 - */ - error(message?: any, ...optionalParams: any[]): void; - /** - * Increases indentation of subsequent lines by spaces for `groupIndentation`length. - * - * If one or more `label`s are provided, those are printed first without the - * additional indentation. - * @since v8.5.0 - */ - group(...label: any[]): void; - /** - * An alias for {@link group}. - * @since v8.5.0 - */ - groupCollapsed(...label: any[]): void; - /** - * Decreases indentation of subsequent lines by spaces for `groupIndentation`length. - * @since v8.5.0 - */ - groupEnd(): void; - /** - * The `console.info()` function is an alias for {@link log}. - * @since v0.1.100 - */ - info(message?: any, ...optionalParams: any[]): void; - /** - * Prints to `stdout` with newline. Multiple arguments can be passed, with the - * first used as the primary message and all additional used as substitution - * values similar to [`printf(3)`](http://man7.org/linux/man-pages/man3/printf.3.html) (the arguments are all passed to `util.format()`). - * - * ```js - * const count = 5; - * console.log('count: %d', count); - * // Prints: count: 5, to stdout - * console.log('count:', count); - * // Prints: count: 5, to stdout - * ``` - * - * See `util.format()` for more information. - * @since v0.1.100 - */ - log(message?: any, ...optionalParams: any[]): void; - /** - * Try to construct a table with the columns of the properties of `tabularData`(or use `properties`) and rows of `tabularData` and log it. Falls back to just - * logging the argument if it can’t be parsed as tabular. - * - * ```js - * // These can't be parsed as tabular data - * console.table(Symbol()); - * // Symbol() - * - * console.table(undefined); - * // undefined - * - * console.table([{ a: 1, b: 'Y' }, { a: 'Z', b: 2 }]); - * // ┌─────────┬─────┬─────┐ - * // │ (index) │ a │ b │ - * // ├─────────┼─────┼─────┤ - * // │ 0 │ 1 │ 'Y' │ - * // │ 1 │ 'Z' │ 2 │ - * // └─────────┴─────┴─────┘ - * - * console.table([{ a: 1, b: 'Y' }, { a: 'Z', b: 2 }], ['a']); - * // ┌─────────┬─────┐ - * // │ (index) │ a │ - * // ├─────────┼─────┤ - * // │ 0 │ 1 │ - * // │ 1 │ 'Z' │ - * // └─────────┴─────┘ - * ``` - * @since v10.0.0 - * @param properties Alternate properties for constructing the table. - */ - table(tabularData: any, properties?: ReadonlyArray): void; - /** - * Starts a timer that can be used to compute the duration of an operation. Timers - * are identified by a unique `label`. Use the same `label` when calling {@link timeEnd} to stop the timer and output the elapsed time in - * suitable time units to `stdout`. For example, if the elapsed - * time is 3869ms, `console.timeEnd()` displays "3.869s". - * @since v0.1.104 - */ - time(label?: string): void; - /** - * Stops a timer that was previously started by calling {@link time} and - * prints the result to `stdout`: - * - * ```js - * console.time('100-elements'); - * for (let i = 0; i < 100; i++) {} - * console.timeEnd('100-elements'); - * // prints 100-elements: 225.438ms - * ``` - * @since v0.1.104 - */ - timeEnd(label?: string): void; - /** - * For a timer that was previously started by calling {@link time}, prints - * the elapsed time and other `data` arguments to `stdout`: - * - * ```js - * console.time('process'); - * const value = expensiveProcess1(); // Returns 42 - * console.timeLog('process', value); - * // Prints "process: 365.227ms 42". - * doExpensiveProcess2(value); - * console.timeEnd('process'); - * ``` - * @since v10.7.0 - */ - timeLog(label?: string, ...data: any[]): void; - /** - * Prints to `stderr` the string `'Trace: '`, followed by the `util.format()` formatted message and stack trace to the current position in the code. - * - * ```js - * console.trace('Show me'); - * // Prints: (stack trace will vary based on where trace is called) - * // Trace: Show me - * // at repl:2:9 - * // at REPLServer.defaultEval (repl.js:248:27) - * // at bound (domain.js:287:14) - * // at REPLServer.runBound [as eval] (domain.js:300:12) - * // at REPLServer. (repl.js:412:12) - * // at emitOne (events.js:82:20) - * // at REPLServer.emit (events.js:169:7) - * // at REPLServer.Interface._onLine (readline.js:210:10) - * // at REPLServer.Interface._line (readline.js:549:8) - * // at REPLServer.Interface._ttyWrite (readline.js:826:14) - * ``` - * @since v0.1.104 - */ - trace(message?: any, ...optionalParams: any[]): void; - /** - * The `console.warn()` function is an alias for {@link error}. - * @since v0.1.100 - */ - warn(message?: any, ...optionalParams: any[]): void; - // --- Inspector mode only --- - /** - * This method does not display anything unless used in the inspector. - * Starts a JavaScript CPU profile with an optional label. - */ - profile(label?: string): void; - /** - * This method does not display anything unless used in the inspector. - * Stops the current JavaScript CPU profiling session if one has been started and prints the report to the Profiles panel of the inspector. - */ - profileEnd(label?: string): void; - /** - * This method does not display anything unless used in the inspector. - * Adds an event with the label `label` to the Timeline panel of the inspector. - */ - timeStamp(label?: string): void; - } - /** - * The `console` module provides a simple debugging console that is similar to the - * JavaScript console mechanism provided by web browsers. - * - * The module exports two specific components: - * - * * A `Console` class with methods such as `console.log()`, `console.error()` and`console.warn()` that can be used to write to any Node.js stream. - * * A global `console` instance configured to write to `process.stdout` and `process.stderr`. The global `console` can be used without calling`require('console')`. - * - * _**Warning**_: The global console object's methods are neither consistently - * synchronous like the browser APIs they resemble, nor are they consistently - * asynchronous like all other Node.js streams. See the `note on process I/O` for - * more information. - * - * Example using the global `console`: - * - * ```js - * console.log('hello world'); - * // Prints: hello world, to stdout - * console.log('hello %s', 'world'); - * // Prints: hello world, to stdout - * console.error(new Error('Whoops, something bad happened')); - * // Prints error message and stack trace to stderr: - * // Error: Whoops, something bad happened - * // at [eval]:5:15 - * // at Script.runInThisContext (node:vm:132:18) - * // at Object.runInThisContext (node:vm:309:38) - * // at node:internal/process/execution:77:19 - * // at [eval]-wrapper:6:22 - * // at evalScript (node:internal/process/execution:76:60) - * // at node:internal/main/eval_string:23:3 - * - * const name = 'Will Robinson'; - * console.warn(`Danger ${name}! Danger!`); - * // Prints: Danger Will Robinson! Danger!, to stderr - * ``` - * - * Example using the `Console` class: - * - * ```js - * const out = getStreamSomehow(); - * const err = getStreamSomehow(); - * const myConsole = new console.Console(out, err); - * - * myConsole.log('hello world'); - * // Prints: hello world, to out - * myConsole.log('hello %s', 'world'); - * // Prints: hello world, to out - * myConsole.error(new Error('Whoops, something bad happened')); - * // Prints: [Error: Whoops, something bad happened], to err - * - * const name = 'Will Robinson'; - * myConsole.warn(`Danger ${name}! Danger!`); - * // Prints: Danger Will Robinson! Danger!, to err - * ``` - * @see [source](https://github.com/nodejs/node/blob/v16.4.2/lib/console.js) - */ - namespace console { - interface ConsoleConstructorOptions { - stdout: NodeJS.WritableStream; - stderr?: NodeJS.WritableStream | undefined; - ignoreErrors?: boolean | undefined; - colorMode?: boolean | 'auto' | undefined; - inspectOptions?: InspectOptions | undefined; - /** - * Set group indentation - * @default 2 - */ - groupIndentation?: number | undefined; - } - interface ConsoleConstructor { - prototype: Console; - new (stdout: NodeJS.WritableStream, stderr?: NodeJS.WritableStream, ignoreErrors?: boolean): Console; - new (options: ConsoleConstructorOptions): Console; - } - } - var console: Console; - } - export = globalThis.console; -} diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ableton Live Suite 10.0.1 !NEW! Keygen - [CrackzSoft.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ableton Live Suite 10.0.1 !NEW! Keygen - [CrackzSoft.md deleted file mode 100644 index 30cae14e447d4adb8c1c5cdedad2f0377cd59bf9..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ableton Live Suite 10.0.1 !NEW! Keygen - [CrackzSoft.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Ableton Live Suite 10.0.1 Keygen - [CrackzSoft


        Download Zip »»» https://urlgoal.com/2uCMzw



        -
        -caino sanchez semiologia cardiovascular pdf descargar gratis, caino ... cardiovascular pdf lo q .. descargar libros elige tu propia aventura . 1fdad05405
        -
        -
        -

        diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cardos Api V3 2 Download.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cardos Api V3 2 Download.md deleted file mode 100644 index 636e8e7afdb1daae05bd5452e860b29ea16cfd95..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cardos Api V3 2 Download.md +++ /dev/null @@ -1,63 +0,0 @@ - -

        Cardos Api V3 2 Download: A Guide to Using Smart Cards and Security Tokens

        -

        If you are looking for a way to use smart cards and security tokens in various applications, such as user authentication, data encryption, and digital signatures, you might want to consider Cardos Api V3 2 Download. Cardos Api is a software product that enables the use of Cardos smart cards and security tokens in a variety of standard applications. Cardos Api supports standard cryptographic interfaces, such as PKCS#11, CAPI, and CTK, and provides a flexible PKCS#15 file system on the smart card. In this article, we will explain what Cardos Api is, how it works, and where you can download it.

        -

        Cardos Api V3 2 Download


        Download →→→ https://urlgoal.com/2uCMlL



        -

        What is Cardos Api?

        -

        Cardos Api is a software product that enables the use of Cardos smart cards and security tokens in various applications. Cardos smart cards and security tokens are based on Infineon chips and the high-end Cardos operating system, which meets the strictest quality requirements and is compatible with all relevant smart card standards. Cardos smart cards and security tokens offer a multitude of applications, such as eID, ePassports, citizen cards, health insurance and health professional cards, employee badges, drivers licenses, signature cards, and loyalty cards.

        -

        Cardos Api provides powerful implementations of the two standard application interfaces for cryptographic services: PKCS#11 (Cryptographic Token Interface) and support of Microsoft CAPI through Cardos Api Minidriver. Via the CAPI interface under Microsoft Windows, Cardos Api supports key and certificate management for applications which is seamlessly integrated in the operating system. The PKCS#11 interface allows applications under Windows, Linux and macOS to use the Cardos Api functionalities. Cardos Api also contains a CryptoTokenKit (CTK) to easily access keys and certificates on Cardos smart cards with native macOS applications. Various applications can access the same key material via both interfaces simultaneously.

        -

        Cardos Api provides a standard-based dynamic PKCS#15 file system on the smart card which can be flexibly customized according to customer requirements. Thus Cardos Api enables simple and efficient use of Cardos smart cards with cryptographic keys and certificates in numerous applications. Support of various operating systems, use of international standards and the realization of state-of-the-art cryptographic algorithms ensure sustainability for the future. The option to insert PINs via PinPad reader (SPE) protects against eavesdropping of PINs on the computer.

        -

        How does Cardos Api work?

        -

        Cardos Api works by providing a software layer between the application and the smart card or security token. The application can use the standard cryptographic interfaces (PKCS#11 or CAPI) to access the functions of the smart card or security token without knowing the details of the underlying hardware or operating system. Cardos Api handles the communication with the smart card or security token and performs the necessary operations on the PKCS#15 file system.

        -

        -

        The PKCS#15 file system is a standard format for storing cryptographic objects (such as keys and certificates) on a smart card or security token. It consists of a logical directory structure that organizes the objects into different categories (such as private keys, public keys, certificates, etc.). Each object has an associated set of attributes that define its properties (such as label, ID, usage flags, etc.). The PKCS#15 file system can be customized according to customer requirements by using different parameters (such as file size, access conditions, etc.).

        -

        Where can you download Cardos Api?

        -

        If you are interested in Cardos Api V3 2 Download, you can find it on the official website of Atos, which is the provider of Cardos solutions. Atos is a global leader in digital transformation with over 110,000 employees in 73 countries and annual revenue of over € 12 billion. Atos offers a range of cybersecurity solutions, including IoT and OT security, smart card solutions, identity management solutions, encryption solutions, and more.

        -

        To download Cardos Api V3 2, you need to register on the Atos website and provide some information about yourself and your organization. You will then receive an email with a link to download the software package. The software package includes an installer that will guide you through the installation process. You will also need to have a valid license key to activate the software.

        -

        Cardos Api V3 2 is available for all common operating systems: Windows, Linux, and macOS. You can choose the version that suits your needs and preferences. You can also find documentation and support resources on the Atos website to help you with using Cardos Api.

        -

        Conclusion

        -

        Cardos Api V3 2 Download is a software product that enables the use of Cardos smart cards and security tokens in various applications. It supports standard cryptographic interfaces, such as PKCS#11 and CAPI, and provides a flexible PKCS#15 file system on the smart card. It works by providing a software layer between the application and the smart card or security token that handles the communication and operations. You can download it from the official website of Atos after registering and providing some information. You will also need a valid license key to activate it.

        -

        If you are looking for a way to use smart cards and security tokens in your business or personal projects, you might want to consider Cardos Api V3 2 Download. It is a powerful integration software that offers great convenience by supporting technical standards and providing sophisticated functionality.

        -

        What are the benefits of Cardos Api?

        -

        Cardos Api offers many benefits for users who want to use smart cards and security tokens in their applications. Some of the benefits are:

        -
          -
        • Cardos Api enables efficient user-friendly and simple implementation of smart cards and security tokens in a variety of application scenarios, such as system login, web authentication, or secure email.
        • -
        • Cardos Api supports various operating systems, such as Windows, Linux, and macOS, and is compatible with international standards, such as PKCS#11, CAPI, CTK, and PKCS#15.
        • -
        • Cardos Api provides state-of-the-art cryptographic algorithms and functions, such as RSA, ECC, AES, SHA, and more.
        • -
        • Cardos Api allows the use of PinPad readers to protect against eavesdropping of PINs on the computer.
        • -
        • Cardos Api combined with the secure smart card operating system CardOS provides the perfect foundation for ID cards in different industries, especially in the public sector and in the healthcare sector.
        • -
        -

        How to use Cardos Api?

        -

        To use Cardos Api, you need to have a Cardos smart card or security token, a smart card reader, and a valid license key. You also need to download and install Cardos Api on your computer. Here are the steps to use Cardos Api:

        -
          -
        1. Download Cardos Api V3 2 from the Atos website. You will need to register and provide some information to get the download link.
        2. -
        3. Run the installer and follow the instructions to install Cardos Api on your computer.
        4. -
        5. Insert your Cardos smart card or security token into the smart card reader connected to your computer.
        6. -
        7. Enter your license key to activate Cardos Api.
        8. -
        9. Select the application interface that you want to use: PKCS#11 or CAPI. You can also use both interfaces simultaneously.
        10. -
        11. Use your application to access the functions of the smart card or security token via Cardos Api.
        12. -
        -

        You can also use Cardos Api to manage your smart card or security token, such as creating or deleting objects, changing PINs, or updating firmware. You can find more information on how to use Cardos Api in the documentation and support resources on the Atos website.

        -

        What are the features of Cardos Api?

        -

        Cardos Api has many features that make it a powerful integration software for smart cards and security tokens. Some of the features are:

        -
          -
        • Cardos Api supports various Cardos smart cards and security tokens, such as CardOS V5.3, CardOS V5.4, CardOS V5.5, CardOS FIDO2, CardOS IoT V5.4, and more.
        • -
        • Cardos Api supports various smart card readers, such as USB readers, Bluetooth readers, NFC readers, and PinPad readers.
        • -
        • Cardos Api supports various cryptographic algorithms and functions, such as RSA (up to 4096 bits), ECC (up to 521 bits), AES (128/192/256 bits), SHA (1/224/256/384/512 bits), ECDSA, ECDH, and more.
        • -
        • Cardos Api supports various certificate formats and standards, such as X.509, PKCS#12, CVC, and more.
        • -
        • Cardos Api supports various authentication methods and protocols, such as PIN, PUK, OTP, FIDO2, SAML, OATH, and more.
        • -
        -

        What are the use cases of Cardos Api?

        -

        Cardos Api can be used in various use cases that require smart cards and security tokens. Some of the use cases are:

        -
          -
        • User authentication: Cardos Api can be used to authenticate users for system login, web authentication, VPN access, or secure email. Users can use their Cardos smart card or security token to prove their identity and access the resources they need.
        • -
        • Data encryption: Cardos Api can be used to encrypt data using the keys stored on the Cardos smart card or security token. Users can protect their sensitive data from unauthorized access or tampering by using their Cardos smart card or security token.
        • -
        • Digital signatures: Cardos Api can be used to create digital signatures using the keys stored on the Cardos smart card or security token. Users can sign documents or transactions electronically with their Cardos smart card or security token and verify their authenticity and integrity.
        • -
        • ID cards: Cardos Api can be used to create ID cards using the Cardos smart card or security token. Users can store their personal information and credentials on their Cardos smart card or security token and use it for various purposes, such as e-government, e-healthcare, e-education, e-commerce, or e-travel.
        • -
        • IoT security: Cardos Api can be used to integrate cryptographic functions into IoT devices by embedding a secure element into an existing electronic control unit (ECU). Users can secure their IoT devices from cyberattacks by using their Cardos smart card or security token.
        • -
        -

        Conclusion

        -

        Cardos Api V3 2 Download is a software product that enables the use of Cardos smart cards and security tokens in various applications. It supports standard cryptographic interfaces, such as PKCS#11 and CAPI, and provides a flexible PKCS#15 file system on the smart card. It works by providing a software layer between the application and the smart card or security token that handles the communication and operations. You can download it from the official website of Atos after registering and providing some information. You will also need a valid license key to activate it.

        -

        Cardos Api offers many benefits and features for users who want to use smart cards and security tokens in their applications. It enables efficient user-friendly and simple implementation of smart cards and security tokens in a variety of application scenarios, such as user authentication, data encryption, digital signatures, ID cards, and IoT security. It supports various operating systems, smart card readers, cryptographic algorithms and functions, certificate formats and standards, authentication methods and protocols, and Cardos smart cards and security tokens.

        -

        If you are looking for a powerful integration software for smart cards and security tokens, you might want to consider Cardos Api V3 2 Download. It is a proven, widely-used software that seamlessly integrates Cardos smart cards and security tokens in all relevant system environments. With this powerful package, you can adapt your infrastructure flexibly at any time to keep smart card administration uncomplicated, cost efficient and secure.

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ecut 5 UPD Keygen For Corel X5.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ecut 5 UPD Keygen For Corel X5.md deleted file mode 100644 index c6587863860b9dd87c895d4d3d94d7f721a3d26b..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ecut 5 UPD Keygen For Corel X5.md +++ /dev/null @@ -1,28 +0,0 @@ -

        Ecut 5 Keygen For Corel X5


        Download Ziphttps://urlgoal.com/2uCKWU



        - -4. Fancube – Create custom videos using your photos - -Fancube is also one of the best video downloader. It enables you to convert videos from websites, such as Netflix, BBC, YouTube, Hulu and Vimeo. You can download your favorite movies and TV shows in this app as MP4 and M4V files. - -5. Movies4Video – Select and download the best movies to watch offline - -Movies4Video is an offline movie downloader which can be used to download your favorite movies. You can find many videos from different websites including Netflix, YouTube, Vimeo, BBC iPlayer, and many more. The best thing about this app is that it automatically downloads the best video in a different quality according to your internet connection speed. You can watch and watch your favorite movies without any internet connection. - -6. VLC Media Player – Easily watch videos offline - -If you have an iPhone or iPad, VLC is a great video player. It has the ability to download and watch videos offline. You don’t need any internet connection to watch your favorite movies. VLC player is free to download and use, but you will need a paid license if you want to download more than 10 videos. If you want to use VLC media player for iOS, then you should use the official app from VLC media player. - -7. The Loop – Watch and download your favorite videos from YouTube - -The Loop is one of the best video downloader. You can watch your favorite videos without any data or internet connection. It enables you to watch videos from Vimeo and YouTube. This app has several features, such as watching a video offline, grabbing the best parts of the video, streaming any videos, watching YouTube videos in full screen, and more. - -8. Videvo – Download and watch videos - -Videvo is a unique app that converts videos to mobile and desktop formats. You can use it to download any video you want from YouTube, Netflix, BBC iPlayer and many other websites. It also enables you to convert videos into iPod, iPhone, Android, and iPad formats. The great thing about Videvo is that it saves you a lot of space on your device. - -9. VLC (VideoLAN) – Download and watch videos - -VLC is a popular video downloader. It has an ability to convert your videos and play them in any format such as DVD, MP4, and MP3. You 4fefd39f24
        -
        -
        -

        diff --git a/spaces/reimari/rvc-aa99/config.py b/spaces/reimari/rvc-aa99/config.py deleted file mode 100644 index c0c16e0017efbcaf250cb539a1d0edb4e83575e4..0000000000000000000000000000000000000000 --- a/spaces/reimari/rvc-aa99/config.py +++ /dev/null @@ -1,88 +0,0 @@ -########################硬件参数######################## - -# 填写cuda:x, cpu 或 mps, x指代第几张卡,只支持 N卡 / Apple Silicon 加速 -device = "cuda:0" - -# 9-10-20-30-40系显卡无脑True,不影响质量,>=20显卡开启有加速 -is_half = True - -# 默认0用上所有线程,写数字限制CPU资源使用 -n_cpu = 0 - -########################硬件参数######################## - - -##################下为参数处理逻辑,勿动################## - -########################命令行参数######################## -import argparse - -parser = argparse.ArgumentParser() -parser.add_argument("--port", type=int, default=7865, help="Listen port") -parser.add_argument("--pycmd", type=str, default="python", help="Python command") -parser.add_argument("--colab", action="store_true", help="Launch in colab") -parser.add_argument( - "--noparallel", action="store_true", help="Disable parallel processing" -) -parser.add_argument( - "--noautoopen", action="store_true", help="Do not open in browser automatically" -) -cmd_opts, unknown = parser.parse_known_args() - -python_cmd = cmd_opts.pycmd -listen_port = cmd_opts.port -iscolab = cmd_opts.colab -noparallel = cmd_opts.noparallel -noautoopen = cmd_opts.noautoopen -########################命令行参数######################## - -import sys -import torch - - -# has_mps is only available in nightly pytorch (for now) and MasOS 12.3+. -# check `getattr` and try it for compatibility -def has_mps() -> bool: - if sys.platform != "darwin": - return False - else: - if not getattr(torch, "has_mps", False): - return False - try: - torch.zeros(1).to(torch.device("mps")) - return True - except Exception: - return False - - -if not torch.cuda.is_available(): - if has_mps(): - print("没有发现支持的N卡, 使用MPS进行推理") - device = "mps" - else: - print("没有发现支持的N卡, 使用CPU进行推理") - device = "cpu" - is_half = False - -if device not in ["cpu", "mps"]: - gpu_name = torch.cuda.get_device_name(int(device.split(":")[-1])) - if "16" in gpu_name or "MX" in gpu_name: - print("16系显卡/MX系显卡强制单精度") - is_half = False - -from multiprocessing import cpu_count - -if n_cpu == 0: - n_cpu = cpu_count() -if is_half: - # 6G显存配置 - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 -else: - # 5G显存配置 - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 diff --git a/spaces/ritwikbiswas/incoder-complete/tokenizers_patch.py b/spaces/ritwikbiswas/incoder-complete/tokenizers_patch.py deleted file mode 100644 index cf577f8fe9fe509cb3c6c37d2e8dc3b12f7c6194..0000000000000000000000000000000000000000 --- a/spaces/ritwikbiswas/incoder-complete/tokenizers_patch.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import sys -import subprocess - - -print("Getting rustup") -subprocess.run( - "curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y", - shell=True, -) -print("Got rustup") -myenv = os.environ.copy() -myenv["PATH"] = os.path.expanduser("~/.cargo/bin:") + myenv["PATH"] -print("RUSTC", os.path.isfile(os.path.expanduser("~/.cargo/bin/rustc"))) -subprocess.run("rustc --version", shell=True, env=myenv) -subprocess.run( - "pip install -e git+https://github.com/huggingface/tokenizers/#egg=tokenizers\&subdirectory=bindings/python", - shell=True, - env=myenv, -) -sys.path.append( - os.path.join(os.getcwd(), "src", "tokenizers", "bindings", "python", "py_src") -) - - -import tokenizers diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/backbones/regnet.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/backbones/regnet.py deleted file mode 100644 index 63adc3c1deb3b48193c243eb4ec5178a0b62103b..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/backbones/regnet.py +++ /dev/null @@ -1,356 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import numpy as np -import torch.nn as nn -from mmcv.cnn import build_conv_layer, build_norm_layer - -from ..builder import BACKBONES -from .resnet import ResNet -from .resnext import Bottleneck - - -@BACKBONES.register_module() -class RegNet(ResNet): - """RegNet backbone. - - More details can be found in `paper `_ . - - Args: - arch (dict): The parameter of RegNets. - - - w0 (int): initial width - - wa (float): slope of width - - wm (float): quantization parameter to quantize the width - - depth (int): depth of the backbone - - group_w (int): width of group - - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck. - strides (Sequence[int]): Strides of the first block of each stage. - base_channels (int): Base channels after stem layer. - in_channels (int): Number of input image channels. Default: 3. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - frozen_stages (int): Stages to be frozen (all param fixed). -1 means - not freezing any parameters. - norm_cfg (dict): dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): whether to use zero init for last norm layer - in resblocks to let them behave as identity. - pretrained (str, optional): model pretrained path. Default: None - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - - Example: - >>> from mmdet.models import RegNet - >>> import torch - >>> self = RegNet( - arch=dict( - w0=88, - wa=26.31, - wm=2.25, - group_w=48, - depth=25, - bot_mul=1.0)) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 96, 8, 8) - (1, 192, 4, 4) - (1, 432, 2, 2) - (1, 1008, 1, 1) - """ - arch_settings = { - 'regnetx_400mf': - dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), - 'regnetx_800mf': - dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), - 'regnetx_1.6gf': - dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), - 'regnetx_3.2gf': - dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), - 'regnetx_4.0gf': - dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), - 'regnetx_6.4gf': - dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), - 'regnetx_8.0gf': - dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), - 'regnetx_12gf': - dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), - } - - def __init__(self, - arch, - in_channels=3, - stem_channels=32, - base_channels=32, - strides=(2, 2, 2, 2), - dilations=(1, 1, 1, 1), - out_indices=(0, 1, 2, 3), - style='pytorch', - deep_stem=False, - avg_down=False, - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - dcn=None, - stage_with_dcn=(False, False, False, False), - plugins=None, - with_cp=False, - zero_init_residual=True, - pretrained=None, - init_cfg=None): - super(ResNet, self).__init__(init_cfg) - - # Generate RegNet parameters first - if isinstance(arch, str): - assert arch in self.arch_settings, \ - f'"arch": "{arch}" is not one of the' \ - ' arch_settings' - arch = self.arch_settings[arch] - elif not isinstance(arch, dict): - raise ValueError('Expect "arch" to be either a string ' - f'or a dict, got {type(arch)}') - - widths, num_stages = self.generate_regnet( - arch['w0'], - arch['wa'], - arch['wm'], - arch['depth'], - ) - # Convert to per stage format - stage_widths, stage_blocks = self.get_stages_from_blocks(widths) - # Generate group widths and bot muls - group_widths = [arch['group_w'] for _ in range(num_stages)] - self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] - # Adjust the compatibility of stage_widths and group_widths - stage_widths, group_widths = self.adjust_width_group( - stage_widths, self.bottleneck_ratio, group_widths) - - # Group params by stage - self.stage_widths = stage_widths - self.group_widths = group_widths - self.depth = sum(stage_blocks) - self.stem_channels = stem_channels - self.base_channels = base_channels - self.num_stages = num_stages - assert num_stages >= 1 and num_stages <= 4 - self.strides = strides - self.dilations = dilations - assert len(strides) == len(dilations) == num_stages - self.out_indices = out_indices - assert max(out_indices) < num_stages - self.style = style - self.deep_stem = deep_stem - self.avg_down = avg_down - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.with_cp = with_cp - self.norm_eval = norm_eval - self.dcn = dcn - self.stage_with_dcn = stage_with_dcn - if dcn is not None: - assert len(stage_with_dcn) == num_stages - self.plugins = plugins - self.zero_init_residual = zero_init_residual - self.block = Bottleneck - expansion_bak = self.block.expansion - self.block.expansion = 1 - self.stage_blocks = stage_blocks[:num_stages] - - self._make_stem_layer(in_channels, stem_channels) - - block_init_cfg = None - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be specified at the same time' - if isinstance(pretrained, str): - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - elif pretrained is None: - if init_cfg is None: - self.init_cfg = [ - dict(type='Kaiming', layer='Conv2d'), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ] - if self.zero_init_residual: - block_init_cfg = dict( - type='Constant', val=0, override=dict(name='norm3')) - else: - raise TypeError('pretrained must be a str or None') - - self.inplanes = stem_channels - self.res_layers = [] - for i, num_blocks in enumerate(self.stage_blocks): - stride = self.strides[i] - dilation = self.dilations[i] - group_width = self.group_widths[i] - width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i])) - stage_groups = width // group_width - - dcn = self.dcn if self.stage_with_dcn[i] else None - if self.plugins is not None: - stage_plugins = self.make_stage_plugins(self.plugins, i) - else: - stage_plugins = None - - res_layer = self.make_res_layer( - block=self.block, - inplanes=self.inplanes, - planes=self.stage_widths[i], - num_blocks=num_blocks, - stride=stride, - dilation=dilation, - style=self.style, - avg_down=self.avg_down, - with_cp=self.with_cp, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - dcn=dcn, - plugins=stage_plugins, - groups=stage_groups, - base_width=group_width, - base_channels=self.stage_widths[i], - init_cfg=block_init_cfg) - self.inplanes = self.stage_widths[i] - layer_name = f'layer{i + 1}' - self.add_module(layer_name, res_layer) - self.res_layers.append(layer_name) - - self._freeze_stages() - - self.feat_dim = stage_widths[-1] - self.block.expansion = expansion_bak - - def _make_stem_layer(self, in_channels, base_channels): - self.conv1 = build_conv_layer( - self.conv_cfg, - in_channels, - base_channels, - kernel_size=3, - stride=2, - padding=1, - bias=False) - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, base_channels, postfix=1) - self.add_module(self.norm1_name, norm1) - self.relu = nn.ReLU(inplace=True) - - def generate_regnet(self, - initial_width, - width_slope, - width_parameter, - depth, - divisor=8): - """Generates per block width from RegNet parameters. - - Args: - initial_width ([int]): Initial width of the backbone - width_slope ([float]): Slope of the quantized linear function - width_parameter ([int]): Parameter used to quantize the width. - depth ([int]): Depth of the backbone. - divisor (int, optional): The divisor of channels. Defaults to 8. - - Returns: - list, int: return a list of widths of each stage and the number \ - of stages - """ - assert width_slope >= 0 - assert initial_width > 0 - assert width_parameter > 1 - assert initial_width % divisor == 0 - widths_cont = np.arange(depth) * width_slope + initial_width - ks = np.round( - np.log(widths_cont / initial_width) / np.log(width_parameter)) - widths = initial_width * np.power(width_parameter, ks) - widths = np.round(np.divide(widths, divisor)) * divisor - num_stages = len(np.unique(widths)) - widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() - return widths, num_stages - - @staticmethod - def quantize_float(number, divisor): - """Converts a float to closest non-zero int divisible by divisor. - - Args: - number (int): Original number to be quantized. - divisor (int): Divisor used to quantize the number. - - Returns: - int: quantized number that is divisible by devisor. - """ - return int(round(number / divisor) * divisor) - - def adjust_width_group(self, widths, bottleneck_ratio, groups): - """Adjusts the compatibility of widths and groups. - - Args: - widths (list[int]): Width of each stage. - bottleneck_ratio (float): Bottleneck ratio. - groups (int): number of groups in each stage - - Returns: - tuple(list): The adjusted widths and groups of each stage. - """ - bottleneck_width = [ - int(w * b) for w, b in zip(widths, bottleneck_ratio) - ] - groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)] - bottleneck_width = [ - self.quantize_float(w_bot, g) - for w_bot, g in zip(bottleneck_width, groups) - ] - widths = [ - int(w_bot / b) - for w_bot, b in zip(bottleneck_width, bottleneck_ratio) - ] - return widths, groups - - def get_stages_from_blocks(self, widths): - """Gets widths/stage_blocks of network at each stage. - - Args: - widths (list[int]): Width in each stage. - - Returns: - tuple(list): width and depth of each stage - """ - width_diff = [ - width != width_prev - for width, width_prev in zip(widths + [0], [0] + widths) - ] - stage_widths = [ - width for width, diff in zip(widths, width_diff[:-1]) if diff - ] - stage_blocks = np.diff([ - depth for depth, diff in zip(range(len(width_diff)), width_diff) - if diff - ]).tolist() - return stage_widths, stage_blocks - - def forward(self, x): - """Forward function.""" - x = self.conv1(x) - x = self.norm1(x) - x = self.relu(x) - - outs = [] - for i, layer_name in enumerate(self.res_layers): - res_layer = getattr(self, layer_name) - x = res_layer(x) - if i in self.out_indices: - outs.append(x) - return tuple(outs) diff --git a/spaces/rorallitri/biomedical-language-models/logs/Deadisland13patchcrack Extra Quality.md b/spaces/rorallitri/biomedical-language-models/logs/Deadisland13patchcrack Extra Quality.md deleted file mode 100644 index 55fc981147b4cd769169e3da6a88fc8bb31e1ed5..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Deadisland13patchcrack Extra Quality.md +++ /dev/null @@ -1,6 +0,0 @@ -

        deadisland13patchcrack


        Download Filehttps://tinurll.com/2uzlHs



        -
        -Tamil Hd Video Songs 1080p Blu The Jungle Book DOWNLOAD. 02cac431c2 sdc40 yamatake manual. 02cac431c2. deadisland13patchcrack. read more. 1fdad05405
        -
        -
        -

        diff --git a/spaces/rorallitri/biomedical-language-models/logs/DgFlick Book Xpress Pro 7.1.0.0 Crack The Ultimate Tool for Book Creation and Editing.md b/spaces/rorallitri/biomedical-language-models/logs/DgFlick Book Xpress Pro 7.1.0.0 Crack The Ultimate Tool for Book Creation and Editing.md deleted file mode 100644 index ed7557e56c7668423a199a9c4da80667014cc499..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/DgFlick Book Xpress Pro 7.1.0.0 Crack The Ultimate Tool for Book Creation and Editing.md +++ /dev/null @@ -1,6 +0,0 @@ -

        DgFlick Book Xpress Pro 7.1.0.0 Crack


        Download File ★★★ https://tinurll.com/2uzmMY



        - - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/rorallitri/biomedical-language-models/logs/Lorganizzazione Ringrazia Firmato Il Santo 1 Movie [BEST] Download 720p.md b/spaces/rorallitri/biomedical-language-models/logs/Lorganizzazione Ringrazia Firmato Il Santo 1 Movie [BEST] Download 720p.md deleted file mode 100644 index f9f2d43b1c022a66421e10051b8b0577bd5d673e..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Lorganizzazione Ringrazia Firmato Il Santo 1 Movie [BEST] Download 720p.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Lorganizzazione Ringrazia Firmato Il Santo 1 Movie Download 720p


        DOWNLOAD > https://tinurll.com/2uzola



        -
        - 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/runa91/barc_gradio/src/stacked_hourglass/utils/__init__.py b/spaces/runa91/barc_gradio/src/stacked_hourglass/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/s1591428/README/README.md b/spaces/s1591428/README/README.md deleted file mode 100644 index 39ae6cc433daaba4f7834f98132daf9edc3d5e6c..0000000000000000000000000000000000000000 --- a/spaces/s1591428/README/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: 結合 DAO 與 NFT 之機器人公司的無限可能 -emoji: 🔥 -colorFrom: indigo -colorTo: gray -sdk: streamlit -pinned: false -license: apache-2.0 ---- - -Edit this `README.md` markdown file to author your organization card 🔥 \ No newline at end of file diff --git a/spaces/salti/arabic-question-paraphrasing/app.py b/spaces/salti/arabic-question-paraphrasing/app.py deleted file mode 100644 index a8086faac4983cd697b4bebd922bf2b2a9c1df30..0000000000000000000000000000000000000000 --- a/spaces/salti/arabic-question-paraphrasing/app.py +++ /dev/null @@ -1,73 +0,0 @@ -import gradio as gr -import torch -from transformers import AutoModelForSeq2SeqLM, AutoTokenizer - - -tokenizer = AutoTokenizer.from_pretrained( - "salti/arabic-t5-small-question-paraphrasing", use_fast=True -) - -model = AutoModelForSeq2SeqLM.from_pretrained( - "salti/arabic-t5-small-question-paraphrasing" -).eval() - -prompt = "أعد صياغة: " - - -@torch.inference_mode() -def paraphrase(question, num_beams, encoder_no_repeat_ngram_size): - question = prompt + question - input_ids = tokenizer(question, return_tensors="pt").input_ids - generated_tokens = ( - model.generate( - input_ids, - num_beams=num_beams, - encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size, - ) - .squeeze() - .cpu() - .numpy() - ) - return tokenizer.decode(generated_tokens, skip_special_tokens=True) - - -question = gr.inputs.Textbox(label="اكتب سؤالاً باللغة العربية") -num_beams = gr.inputs.Slider(1, 10, step=1, default=1, label="Beam size") -encoder_no_repeat_ngram_size = gr.inputs.Slider( - 0, - 10, - step=1, - default=3, - label="N-grams of this size won't be copied from the input (forces more diverse outputs)", -) - -outputs = gr.outputs.Textbox(label="السؤال بصيغة مختلفة") - -examples = [ - [ - "متى تم اختراع الكتابة؟", - 5, - 3, - ], - [ - "ما عدد حروف اللغة العربية؟", - 5, - 3, - ], - [ - "ما هو الذكاء الصنعي؟", - 5, - 3, - ], -] - -iface = gr.Interface( - fn=paraphrase, - inputs=[question, num_beams, encoder_no_repeat_ngram_size], - outputs=outputs, - examples=examples, - title="Arabic question paraphrasing", - theme="huggingface", -) - -iface.launch() diff --git a/spaces/sampath02061982/MyGenAi/README.md b/spaces/sampath02061982/MyGenAi/README.md deleted file mode 100644 index 8d1c5699b941705a60f02455bdc7e57387274dc0..0000000000000000000000000000000000000000 --- a/spaces/sampath02061982/MyGenAi/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MyGenAi -emoji: 📈 -colorFrom: green -colorTo: indigo -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sarinam/speaker-anonymization-gan/IMSToucan/Utility/__init__.py b/spaces/sarinam/speaker-anonymization-gan/IMSToucan/Utility/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/scedlatioru/img-to-music/example/Babys Trip To China Full Movie Download VERIFIED.md b/spaces/scedlatioru/img-to-music/example/Babys Trip To China Full Movie Download VERIFIED.md deleted file mode 100644 index e91e69a61dd43bbb8d489610b497ad2c879d53aa..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Babys Trip To China Full Movie Download VERIFIED.md +++ /dev/null @@ -1,6 +0,0 @@ -

        baby's trip to china full movie download


        DOWNLOAD 🗹 https://gohhs.com/2uEzpR



        -
        -Take advantage of cheap flights, great last minute vacations, and discount ... But now Typhoon TV has been released, you can enjoy movies and TV shows without any hassle. ... Top Today Free Download Android APK APPS And Games. ... Trinidad Office 16 Alcazar Street St. of China Hong Kong S. The Pirates of the ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/scedlatioru/img-to-music/example/HD Online Player (Miss.Khiladi - The Perfect Player Mo).md b/spaces/scedlatioru/img-to-music/example/HD Online Player (Miss.Khiladi - The Perfect Player Mo).md deleted file mode 100644 index 3dd12006054a3b08c9d1fc56c87e1373d0cf25b6..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/HD Online Player (Miss.Khiladi - The Perfect Player Mo).md +++ /dev/null @@ -1,10 +0,0 @@ -

        HD Online Player (Miss.Khiladi - The Perfect Player Mo)


        Download Zip >> https://gohhs.com/2uEzsD



        -
        -The film features an ensemble cast of Biswa Kalyan Rath, Shriya Saxena, Umesh Shukla, Pankaj Tripathi, Kavita Seth and Neha Dubey. The film's story and screenplay are written by Praful K. Gandhe, while Balani Satyaranjan, Jitendra Sonawane, Ankit Anand and Manish Jha are the director, editor, cinematographer and actor in this movie, respectively. The film was released on 7 November 2016.Miss Khiladi The Perfect Player (2016) - -Miss Khiladi The Perfect Player (2016) Full HD Online Player (Miss.Khiladi - The Perfect Player Mo) Download: ( Miss Khiladi The Perfect Player (2016) Hindi Movie Watch Online HD Print Download Watch Full Movie Miss Khiladi The Perfect Player (2016) Hindi Adult. The film features an ensemble cast of Biswa Kalyan Rath, Shriya Saxena, Umesh Shukla, Pankaj Tripathi, Kavita Seth and Neha Dubey. The film's story and screenplay are written by Praful K. Gandhe, while Balani Satyaranjan, Jitendra Sonawane, Ankit Anand and Manish Jha are the director, editor, cinematographer and actor in this movie, respectively. The film was released on 7 November 2016.Miss Khiladi The Perfect Player (2016) - -Miss Khiladi The Perfect Player (2016) Full HD Online Player (Miss.Khiladi - The Perfect Player Mo) 4fefd39f24
        -
        -
        -

        diff --git a/spaces/scedlatioru/img-to-music/example/Proshow Style Pack 5 Torrent ((NEW)).md b/spaces/scedlatioru/img-to-music/example/Proshow Style Pack 5 Torrent ((NEW)).md deleted file mode 100644 index 47f41a96c286f0ad6ee9eaeb1ef32c86fd79ee96..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Proshow Style Pack 5 Torrent ((NEW)).md +++ /dev/null @@ -1,18 +0,0 @@ -

        proshow style pack 5 torrent


        Download File >> https://gohhs.com/2uEzgB



        - -options(error=recover) - -It’s been well over a year since I put my first real blog post up here. Many people ask “Why have you been gone so long?”. Well, the truth is I have been taking care of business around here. With my grandmother becoming ill, it became necessary to be here more than just a few days a month. - -Then a couple months ago, I discovered how critical it is to take care of yourself. Working full-time at a really intense job and trying to run a side business, it’s hard to take care of yourself. I found myself getting sick a lot more than I used to and I was getting really tired of that. It was around this time that I went to the doctor for the first time in about 10 years and he diagnosed me with anemia. It was discovered I had been low for over a year. I was shocked. I thought I would never get sick again. But my doctor had a few things to say to me that really changed my life. - -I realized that not taking care of myself was not good for me. I was taking care of other people. That is not good for anyone. In many ways, I have been feeling good lately. It’s great to know that I don’t have to pretend anymore. I am taking care of me. - -I have a set of goals I am working toward. My first goal is to stay healthy. It’s not going to be easy. I will always get sick. There will be times where I just want to eat a big meal and feel really tired after. I will probably be on anti-biotics a lot of the time and sometimes steroids. My life is going to change drastically. I am used to having the ability to not feel sick, but as I find out more and more, that is not a normal life. I will have days where I feel great and other days where I feel terrible. I have to work on getting myself back to a state of feeling good. - -My second goal is to make some money. I have a passion for researching and writing about the subject of dogs. I want to publish a book and maybe even write for TV. I have the means to achieve this goal. This is a long-term goal. I have no guarantees but I am working towards it. - -My third goal is to make my life more meaningful. I want to feel like 4fefd39f24
        -
        -
        -

        diff --git a/spaces/scedlatioru/img-to-music/example/Smtown The Stage Eng Sub Full [EXCLUSIVE] Movie 13.md b/spaces/scedlatioru/img-to-music/example/Smtown The Stage Eng Sub Full [EXCLUSIVE] Movie 13.md deleted file mode 100644 index 5697e333db4fd575cae330c3a0ba230bf184ef6e..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Smtown The Stage Eng Sub Full [EXCLUSIVE] Movie 13.md +++ /dev/null @@ -1,10 +0,0 @@ -

        Smtown The Stage Eng Sub Full Movie 13


        Downloadhttps://gohhs.com/2uEzWC



        -
        - . . was received by the press more than 500 meters at the recent movie shooting location due to the impression of the wind. . . . Although the shooting in the movie, Suho has been injured in his leg and. . . . in addition, Minho lost his hair, which makes us laugh. . . . Source: TVXQ! Blogs "Suho: hair is not grown, my leg is stiff" and "Minho: even just thinking about the story, the hair on the head fell and fell". __________ Translated by: Moona Hwang (Editor-in-Chief, Moona Drama Blog) ___________________________________________Category: TVXQ! | TVXQ! News | Suho & Minho | *banners*Translator: Moona Hwang | Moona Hwang's Drama Blog | Drama News and Updates | K-Entertainment | Suho & Minho *animated. - -The main reason why we will not succeed is not because of anyone else. It is because we cannot find a good answer to life. When we are looking for a solution in the real world, we find that it can not be found, and when we look for a solution in the imaginary world, we find that it can not be found. The answer is that life is not an answer. Just like life is a place that is difficult to reach, the answer is also a place that is difficult to find. As long as we look in the wrong direction, the answer is still impossible to find. And after a while, we will give up looking for it because it is a place impossible to find. This is the answer to life. If we say that life is a place we can not find the answer, how can we speak of a world beyond death? So for all that, I can not believe in anything. Even if I try to explain how we live, I cannot. - -The main reason why we will not succeed is not because of anyone else. It is because we cannot find a good answer to life. When we are looking for a solution in the real world, we find that it can not be found, and when we look for a solution in the imaginary world, we find that it can not be found. The answer is that life is not an answer. Just like life is a place that is difficult to reach, the answer is also a place that is difficult to find. As 4fefd39f24
        -
        -
        -

        diff --git a/spaces/sczhou/CodeFormer/CodeFormer/basicsr/ops/fused_act/src/fused_bias_act.cpp b/spaces/sczhou/CodeFormer/CodeFormer/basicsr/ops/fused_act/src/fused_bias_act.cpp deleted file mode 100644 index 85ed0a79fb9c75f83470ac834090f03608d998ee..0000000000000000000000000000000000000000 --- a/spaces/sczhou/CodeFormer/CodeFormer/basicsr/ops/fused_act/src/fused_bias_act.cpp +++ /dev/null @@ -1,26 +0,0 @@ -// from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/fused_bias_act.cpp -#include - - -torch::Tensor fused_bias_act_op(const torch::Tensor& input, - const torch::Tensor& bias, - const torch::Tensor& refer, - int act, int grad, float alpha, float scale); - -#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) - -torch::Tensor fused_bias_act(const torch::Tensor& input, - const torch::Tensor& bias, - const torch::Tensor& refer, - int act, int grad, float alpha, float scale) { - CHECK_CUDA(input); - CHECK_CUDA(bias); - - return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)"); -} diff --git a/spaces/segments-tobias/conex/espnet2/tts/feats_extract/abs_feats_extract.py b/spaces/segments-tobias/conex/espnet2/tts/feats_extract/abs_feats_extract.py deleted file mode 100644 index c4a459e5be7235026a880f1b776efdcd5ed8825d..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/tts/feats_extract/abs_feats_extract.py +++ /dev/null @@ -1,23 +0,0 @@ -from abc import ABC -from abc import abstractmethod -from typing import Any -from typing import Dict - -import torch -from typing import Tuple - - -class AbsFeatsExtract(torch.nn.Module, ABC): - @abstractmethod - def output_size(self) -> int: - raise NotImplementedError - - @abstractmethod - def get_parameters(self) -> Dict[str, Any]: - raise NotImplementedError - - @abstractmethod - def forward( - self, input: torch.Tensor, input_lengths: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - raise NotImplementedError diff --git a/spaces/shgao/EditAnything/editany_test.py b/spaces/shgao/EditAnything/editany_test.py deleted file mode 100644 index 62f1b4f77cab3d163f8986f673cd919d3e99028c..0000000000000000000000000000000000000000 --- a/spaces/shgao/EditAnything/editany_test.py +++ /dev/null @@ -1,73 +0,0 @@ -# Edit Anything trained with Stable Diffusion + ControlNet + SAM + BLIP2 -import os -import gradio as gr -from diffusers.utils import load_image -from editany_lora import EditAnythingLoraModel, config_dict -from editany_demo import create_demo_template -from huggingface_hub import hf_hub_download, snapshot_download - - -def create_demo(process, process_image_click=None): - - examples = [ - [ - "dudou,1girl, beautiful face, solo, candle, brown hair, long hair, ,ulzzang-6500-v1.1,(raw photo:1.2),((photorealistic:1.4))best quality ,masterpiece, illustration, an extremely delicate and beautiful, extremely detailed ,CG ,unity ,8k wallpaper, Amazing, finely detail, masterpiece,best quality,official art,extremely detailed CG unity 8k wallpaper,absurdres, incredibly absurdres, huge filesize, ultra-detailed, highres, extremely detailed,beautiful detailed girl, extremely detailed eyes and face, beautiful detailed eyes,cinematic lighting,1girl,see-through,looking at viewer,full body,full-body shot,outdoors,arms behind back,(chinese clothes) ", - "(((mole))),sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, bad anatomy,(long hair:1.4),DeepNegative,(fat:1.2),facing away, looking away,tilted head, lowres,bad anatomy,bad hands, text, error, missing fingers,extra digit, fewer digits, cropped, worstquality, low quality, normal quality,jpegartifacts,signature, watermark, username,blurry,bad feet,cropped,poorly drawn hands,poorly drawn face,mutation,deformed,worst quality,low quality,normal quality,jpeg artifacts,signature,watermark,extra fingers,fewer digits,extra limbs,extra arms,extra legs,malformed limbs,fused fingers,too many fingers,long neck,cross-eyed,mutated hands,polar lowres,bad body,bad proportions,gross proportions,text,error,missing fingers,missing arms,missing legs,extra digit, extra arms, extra leg, extra foot,(freckles),(mole:2)", - 5, - ], - [ - "best quality, ultra high res, (photorealistic:1.4), (detailed beautiful girl:1.4), (medium breasts:0.8), looking_at_viewer, Detailed facial details, beautiful detailed eyes, (multicolored|blue|pink hair: 1.2), green eyes, slender, haunting smile, (makeup:0.3), red lips, , highly detailed clothes, (ulzzang-6500-v1.1:0.3)", - "EasyNegative, paintings, sketches, ugly, 3d, (worst quality:2), (low quality:2), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, age spot, manboobs, backlight,(ugly:1.3), (duplicate:1.3), (morbid:1.2), (mutilated:1.2), (tranny:1.3), mutated hands, (poorly drawn hands:1.3), blurry, (bad anatomy:1.2), (bad proportions:1.3), extra limbs, (disfigured:1.3), (more than 2 nipples:1.3), (more than 1 navel:1.3), (missing arms:1.3), (extra legs:1.3), (fused fingers:1.6), (too many fingers:1.6), (unclear eyes:1.3), bad hands, missing fingers, extra digit, (futa:1.1), bad body, double navel, mutad arms, hused arms, (puffy nipples, dark areolae, dark nipples, rei no himo, inverted nipples, long nipples), NG_DeepNegative_V1_75t, pubic hair, fat rolls, obese, bad-picture-chill-75v", - 8, - ], - [ - "best quality, ultra high res, (photorealistic:1.4), (detailed beautiful girl:1.4), (medium breasts:0.8), looking_at_viewer, Detailed facial details, beautiful detailed eyes, (blue|pink hair), green eyes, slender, smile, (makeup:0.4), red lips, (full body, sitting, beach), , highly detailed clothes, (ulzzang-6500-v1.1:0.3)", - "asyNegative, paintings, sketches, ugly, 3d, (worst quality:2), (low quality:2), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, age spot, manboobs, backlight,(ugly:1.3), (duplicate:1.3), (morbid:1.2), (mutilated:1.2), (tranny:1.3), mutated hands, (poorly drawn hands:1.3), blurry, (bad anatomy:1.2), (bad proportions:1.3), extra limbs, (disfigured:1.3), (more than 2 nipples:1.3), (more than 1 navel:1.3), (missing arms:1.3), (extra legs:1.3), (fused fingers:1.6), (too many fingers:1.6), (unclear eyes:1.3), bad hands, missing fingers, extra digit, (futa:1.1), bad body, double navel, mutad arms, hused arms, (puffy nipples, dark areolae, dark nipples, rei no himo, inverted nipples, long nipples), NG_DeepNegative_V1_75t, pubic hair, fat rolls, obese, bad-picture-chill-75v", - 7, - ], - [ - "mix4, whole body shot, ((8k, RAW photo, highest quality, masterpiece), High detail RAW color photo professional close-up photo, shy expression, cute, beautiful detailed girl, detailed fingers, extremely detailed eyes and face, beautiful detailed nose, beautiful detailed eyes, long eyelashes, light on face, looking at viewer, (closed mouth:1.2), 1girl, cute, young, mature face, (full body:1.3), ((small breasts)), realistic face, realistic body, beautiful detailed thigh,s, same eyes color, (realistic, photo realism:1. 37), (highest quality), (best shadow), (best illustration), ultra high resolution, physics-based rendering, cinematic lighting), solo, 1girl, highly detailed, in office, detailed office, open cardigan, ponytail contorted, beautiful eyes ,sitting in office,dating, business suit, cross-laced clothes, collared shirt, beautiful breast, small breast, Chinese dress, white pantyhose, natural breasts, pink and white hair, ", - "paintings, sketches, (worst quality:2), (low quality:2), (normal quality:2), cloth, underwear, bra, low-res, normal quality, ((monochrome)), ((grayscale)), skin spots, acne, skin blemishes, age spots, glans, bad nipples, long nipples, bad vagina, extra fingers,fewer fingers,strange fingers,bad hand, ng_deepnegative_v1_75t, bad-picture-chill-75v", - 7, - ], - ] - INFO = f""" - ## Generate Your Beauty powered by EditAnything https://github.com/sail-sg/EditAnything - This model is good at generating beautiful female. - """ - WARNING_INFO = f"""### [NOTE] the model is collected from the Internet for demo only, please do not use it for commercial purposes. - We are not responsible for possible risks using this model. - Lora model from https://civitai.com/models/14171/cutegirlmix4 Thanks! - """ - demo = create_demo_template( - process, - process_image_click, - examples=examples, - INFO=INFO, - WARNING_INFO=WARNING_INFO, - ) - return demo - - -if __name__ == "__main__": - # sd_models_path = snapshot_download("shgao/sdmodels") - # lora_model_path = hf_hub_download( - # "mlida/Cute_girl_mix4", "cuteGirlMix4_v10.safetensors") - # model = EditAnythingLoraModel(base_model_path="andite/anything-v4.0", - # lora_model_path=None, use_blip=True, extra_inpaint=True, - # lora_weight=0.5, - # ) - sd_models_path = snapshot_download("shgao/sdmodels") - lora_model_path = hf_hub_download( - "mlida/Cute_girl_mix4", "cuteGirlMix4_v10.safetensors" - ) - model = EditAnythingLoraModel( - base_model_path=os.path.join( - sd_models_path, "chilloutmix_NiPrunedFp32Fix"), - lora_model_path=lora_model_path, - use_blip=True, - extra_inpaint=True, - lora_weight=0.5, - ) - demo = create_demo(model.process, model.process_image_click) - demo.queue().launch(server_name="0.0.0.0") diff --git a/spaces/shivammehta25/Diff-TTSG/diff_ttsg/hifigan/denoiser.py b/spaces/shivammehta25/Diff-TTSG/diff_ttsg/hifigan/denoiser.py deleted file mode 100644 index 23ce638b4348bc13166240e0b7ebecd196810443..0000000000000000000000000000000000000000 --- a/spaces/shivammehta25/Diff-TTSG/diff_ttsg/hifigan/denoiser.py +++ /dev/null @@ -1,64 +0,0 @@ -### Code modified from Rafael Valle's implementation https://github.com/NVIDIA/waveglow/blob/5bc2a53e20b3b533362f974cfa1ea0267ae1c2b1/denoiser.py - -"""Waveglow style denoiser can be used to remove the artifacts from the HiFiGAN generated audio.""" -import torch - - -class Denoiser(torch.nn.Module): - """Removes model bias from audio produced with waveglow""" - - def __init__(self, vocoder, filter_length=1024, n_overlap=4, win_length=1024, mode="zeros"): - super().__init__() - self.filter_length = filter_length - self.hop_length = int(filter_length / n_overlap) - self.win_length = win_length - - dtype, device = next(vocoder.parameters()).dtype, next(vocoder.parameters()).device - self.device = device - if mode == "zeros": - mel_input = torch.zeros((1, 80, 88), dtype=dtype, device=device) - elif mode == "normal": - mel_input = torch.randn((1, 80, 88), dtype=dtype, device=device) - else: - raise Exception(f"Mode {mode} if not supported") - - def stft_fn(audio, n_fft, hop_length, win_length, window): - spec = torch.stft( - audio, - n_fft=n_fft, - hop_length=hop_length, - win_length=win_length, - window=window, - return_complex=True, - ) - spec = torch.view_as_real(spec) - return torch.sqrt(spec.pow(2).sum(-1)), torch.atan2(spec[..., -1], spec[..., 0]) - - self.stft = lambda x : stft_fn( - audio=x, - n_fft=self.filter_length, - hop_length=self.hop_length, - win_length=self.win_length, - window=torch.hann_window(self.win_length, device=device) - ) - self.istft = lambda x, y: torch.istft( - torch.complex(x * torch.cos(y), x * torch.sin(y)), - n_fft=self.filter_length, - hop_length=self.hop_length, - win_length=self.win_length, - window=torch.hann_window(self.win_length, device=device), - ) - - with torch.no_grad(): - bias_audio = vocoder(mel_input).float().squeeze(0) - bias_spec, _ = self.stft(bias_audio) - - self.register_buffer("bias_spec", bias_spec[:, :, 0][:, :, None]) - - @torch.inference_mode() - def forward(self, audio, strength=0.0005): - audio_spec, audio_angles = self.stft(audio) - audio_spec_denoised = audio_spec - self.bias_spec.to(audio.device) * strength - audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0) - audio_denoised = self.istft(audio_spec_denoised, audio_angles) - return audio_denoised \ No newline at end of file diff --git a/spaces/simonduerr/rosettafold2/README.md b/spaces/simonduerr/rosettafold2/README.md deleted file mode 100644 index bc715977ae58cd2f78f6ff17afc3260e312175bf..0000000000000000000000000000000000000000 --- a/spaces/simonduerr/rosettafold2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: RoseTTAfold2 -emoji: 🏢 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.33.1 -app_file: app.py -pinned: false -license: mit ---- - - diff --git a/spaces/simonguest/cs-tutor/levels/3/instructions.md b/spaces/simonguest/cs-tutor/levels/3/instructions.md deleted file mode 100644 index 42e9c7ee6cea2b4b59bf1ccc8bdce580cc89fde7..0000000000000000000000000000000000000000 --- a/spaces/simonguest/cs-tutor/levels/3/instructions.md +++ /dev/null @@ -1,20 +0,0 @@ -# Level 3: Cakes, Cookies, and Pies in 2D Arrays - -The owner of the Project Mercury Pastries Food Truck wants to find the total inventory for each dessert. They have the following values: - - ``` - 25 17 22 - 18 12 15 - 21 19 27 - 30 10 23 - ``` - - Each row represents a unique food truck in the business. Each column represents the number of cakes, cookies, and pies, respectively. - - Write a method that totals each column and displays the total for each category: - - ``` - Cakes: 94 - Cookies: 58 - Pies: 87 - ``` \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Art Of Conquest MOD APK Unlimited Linari and Other Amazing Benefits.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Art Of Conquest MOD APK Unlimited Linari and Other Amazing Benefits.md deleted file mode 100644 index fcd34f5ead3d7455299159ed8eec8efb0c98b780..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Art Of Conquest MOD APK Unlimited Linari and Other Amazing Benefits.md +++ /dev/null @@ -1,107 +0,0 @@ -
        -

        Art of Conquest Mod APK (Unlimited Linari) - A Review

        -

        Are you a fan of strategy games that combine real-time battles, exploration, and base building? If so, you might have heard of Art of Conquest, a mobile game that lets you command hundreds of troops, summon dozens of heroes, and choose from five races to conquer a vast and magical world. But what if you want to enjoy the game without spending real money or waiting for resources? That's where Art of Conquest Mod APK comes in. This is a modified version of the game that gives you unlimited linari, the premium currency that can be used to buy items, heroes, and resources. In this article, we will review what Art of Conquest is, what Art of Conquest Mod APK is, what are the benefits and drawbacks of using it, and whether it is worth trying.

        -

        What is Art of Conquest?

        -

        Art of Conquest is a mobile strategy game that was released in 2017 by Lilith Games. It features:

        -

        art of conquest mod apk (unlimited linari)


        Download Filehttps://ssurll.com/2uNWDD



        -
          -
        • A mobile strategy game with real-time battles and exploration

          -

          You can challenge your friends or other players around the world to thrilling real-time duels. You can also control your heroes and move freely across the land, picking up resources and gear, as well as battling against all sorts of monsters and fearsome bosses in the wilds.

        • -
        • Five playable races with different abilities and units

          -

          You can choose from five mighty races to raise your army: Humans, Dwarves, Lich, Rakan, or Sylvani. Each race has its own strengths and weaknesses, as well as unique units and heroes. For example, Humans are good at archery and cavalry, Dwarves are good at engineering and explosives, Lich are good at magic and undead, Rakan are good at stealth and agility, and Sylvani are good at nature and healing.

        • -
        • A huge and magical world to discover and conquer

          -

          You can explore Nore, Drake, and Sait freely. The diverse terrains, mystical creatures, and interesting events will satisfy your curiosity about this magical world. You can also build your stronghold and besiege the enemy. Six embittered kingdoms strive to survive in this war-torn land

          and you can join one of them or create your own. You can also form alliances with other players and cooperate to defeat powerful enemies or expand your territory.

        • -
        -

        What is Art of Conquest Mod APK?

        -

        Art of Conquest Mod APK is a modified version of the game that gives you unlimited linari, the premium currency that can be used to buy items, heroes, and resources. You can download it from various websites that offer modded games, such as this one. However, you should be careful about the source and the compatibility of the mod, as some of them may not work properly or may contain viruses or malware. To install Art of Conquest Mod APK, you need to:

        -
          -
        1. Download the mod file from a trusted website

          -

          You can search for Art of Conquest Mod APK on Google or use the link provided above. Make sure you download the latest version of the mod that matches the version of the game and your device.

          -

          art of conquest hack mod apk unlimited linari and gold
          -art of conquest mod apk latest version with unlimited linari
          -art of conquest mod apk download for android free unlimited linari
          -art of conquest mod apk offline play mode unlimited linari
          -art of conquest mod apk real-time PVP battle unlimited linari
          -art of conquest mod apk unlimited linari and resources
          -art of conquest mod apk unlimited linari and heroes
          -art of conquest mod apk unlimited linari and troops
          -art of conquest mod apk unlimited linari and dragon
          -art of conquest mod apk unlimited linari and gems
          -art of conquest mod apk unlimited linari and stamina
          -art of conquest mod apk unlimited linari and magic
          -art of conquest mod apk unlimited linari and honor
          -art of conquest mod apk unlimited linari and runes
          -art of conquest mod apk unlimited linari and chests
          -art of conquest mod apk unlimited linari and skins
          -art of conquest mod apk unlimited linari and rewards
          -art of conquest mod apk unlimited linari and events
          -art of conquest mod apk unlimited linari and quests
          -art of conquest mod apk unlimited linari and achievements
          -art of conquest mod apk no root required unlimited linari
          -art of conquest mod apk anti ban protection unlimited linari
          -art of conquest mod apk easy installation unlimited linari
          -art of conquest mod apk 100% working unlimited linari
          -art of conquest mod apk safe and secure unlimited linari
          -how to get art of conquest mod apk with unlimited linari
          -how to install art of conquest mod apk with unlimited linari
          -how to use art of conquest mod apk with unlimited linari
          -how to update art of conquest mod apk with unlimited linari
          -how to uninstall art of conquest mod apk with unlimited linari
          -why download art of conquest mod apk with unlimited linari
          -what is the best site to download art of conquest mod apk with unlimited linari
          -what are the features of art of conquest mod apk with unlimited linari
          -what are the benefits of using art of conquest mod apk with unlimited linari
          -what are the drawbacks of using art of conquest mod apk with unlimited linari
          -what are the tips and tricks for playing art of conquest mod apk with unlimited linari
          -what are the cheats and hacks for art of conquest mod apk with unlimited linari
          -what are the reviews and ratings for art of conquest mod apk with unlimited linari
          -what are the alternatives to art of conquest mod apk with unlimited linari
          -what are the similar games to art of conquest mod apk with unlimited linari

        2. -
        3. Enable unknown sources on your device

          -

          You need to allow your device to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.

        4. -
        5. Install the mod file on your device

          -

          You need to locate the downloaded mod file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.

        6. -
        7. Launch the game and enjoy unlimited linari

          -

          You can now open the game and see that you have unlimited linari in your account. You can use them to buy anything you want in the game, such as items, heroes, and resources.

        8. -
        -

        What are the benefits of using Art of Conquest Mod APK?

        -

        Using Art of Conquest Mod APK can give you some advantages over other players who play the original game. Some of the benefits are:

        -
          -
        • You can enjoy the game without spending real money

          -

          Linari is the premium currency that can be bought with real money in the game. It can be used to buy items, heroes, and resources that can enhance your gameplay and give you an edge over your enemies. However, not everyone can afford to spend real money on a mobile game. With Art of Conquest Mod APK, you don't have to worry about that, as you can get unlimited linari for free.

        • -
        • You can unlock all the heroes and upgrade them faster

          -

          Heroes are powerful units that can lead your army and use special skills in battle. There are over 30 heroes in the game, each with their own personality, backstory, and abilities. However, some of them are locked behind paywalls or require a lot of time and resources to unlock and upgrade. With Art of Conquest Mod APK, you can unlock all the heroes instantly and level them up faster with unlimited linari.

        • -
        • You can build your army and stronghold without waiting for resources

          -

          Resources are essential for building your army and stronghold in the game. You need resources such as wood, gold, mithril, blood diamonds, and crystals to recruit units, construct buildings, research technologies, and craft items. However, resources are limited and take time to gather or produce. With Art of Conquest Mod APK, you can get unlimited resources and build your army and stronghold without any delay.

        • -
        -

        What are the drawbacks of using Art of Conquest Mod APK?

        -

        Using Art of Conquest Mod APK is not without risks or consequences. Some of the drawbacks are:

        -
          -
        • It may not be compatible with the latest version of the game or your device

          -

          The mod may not work properly or at all if it is outdated or incompatible with the latest version of the game or your device. You may experience errors or crashes in the game or lose your progress or data. You may also miss out on new features or updates that are added to the original game.

        • -
        • It may cause errors or crashes in the game

          -

          The mod may interfere with the normal functioning of the game or cause glitches or bugs that affect your gameplay. For example, some players have reported that using the mod causes their heroes to disappear or their resources to reset. You may also encounter problems with connecting to the server or loading the game.

        • -
        • It may get you banned from the game or your account suspended

          -

          The

          The mod may violate the terms of service or the rules of the game, which prohibit the use of any cheats, hacks, or mods that give you an unfair advantage over other players. The developers or the moderators may detect your use of the mod and ban you from the game or suspend your account. You may lose access to your account, your progress, your items, and your friends. You may also face legal action or penalties for breaking the law or infringing the intellectual property rights of the developers.

        • -
        -

        Is Art of Conquest Mod APK worth trying?

        -

        The answer to this question depends on your preference and play style. Some players may find Art of Conquest Mod APK appealing and fun, as it allows them to enjoy the game without limitations or restrictions. They may like the challenge of using the mod and avoiding detection or ban. They may also want to experiment with different heroes, units, and strategies that are otherwise unavailable or hard to obtain in the original game.

        -

        Other players may find Art of Conquest Mod APK unfair and boring, as it takes away the thrill and satisfaction of playing the game legitimately. They may prefer to play fair and support the developers who created the game. They may also value their account security and integrity more than their in-game wealth and power. They may want to experience the game as it is intended and designed by the developers.

        -

        Ultimately, it is up to you to decide whether you want to try Art of Conquest Mod APK or not. You should weigh the pros and cons carefully and be aware of the risks and consequences involved. You should also respect the rights and choices of other players who may have a different opinion or preference than you.

        -

        Conclusion

        -

        Art of Conquest is a mobile strategy game that combines real-time battles, exploration, and base building in a huge and magical world. Art of Conquest Mod APK is a modified version of the game that gives you unlimited linari, the premium currency that can be used to buy items, heroes, and resources. Using Art of Conquest Mod APK can have some benefits, such as enjoying the game without spending real money, unlocking all the heroes and upgrading them faster, and building your army and stronghold without waiting for resources. However, using Art of Conquest Mod APK can also have some drawbacks, such as not being compatible with the latest version of the game or your device, causing errors or crashes in the game, and getting you banned from the game or your account suspended. Whether Art of Conquest Mod APK is worth trying or not depends on your preference and play style. You should consider the advantages and disadvantages carefully and be responsible for your actions.

        -

        FAQs

        -
          -
        1. What is linari in Art of Conquest?

          -

          Linari is the premium currency in Art of Conquest that can be used to buy items, heroes, and resources. You can get linari by completing quests, achievements, events, or buying them with real money.

        2. -
        3. How many races are there in Art of Conquest?

          -

          There are five races in Art of Conquest: Humans, Dwarves, Lich, Rakan, and Sylvani. Each race has its own strengths and weaknesses, as well as unique units and heroes.

        4. -
        5. How many heroes are there in Art of Conquest?

          -

          There are over 30 heroes in Art of Conquest, each with their own personality, backstory, and abilities. You can unlock them by completing quests, events, or buying them with linari.

        6. -
        7. How do I update Art of Conquest Mod APK?

          -

          You need to download the latest version of Art of Conquest Mod APK from a trusted website that matches the version of the game and your device. You also need to uninstall the previous version of Art of Conquest Mod APK before installing the new one.

        8. -
        9. How do I avoid getting banned from using Art of Conquest Mod APK?

          -

          You need to be careful about using Art of Conquest Mod APK, as it may violate the terms of service or the rules of the game. You should not use it excessively or blatantly in front of other players who may report you. You should also backup your data regularly in case you lose your account or progress.

        10. -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Battle Cats MOD APK and Enjoy the Ultimate Cat Strategy Game.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Battle Cats MOD APK and Enjoy the Ultimate Cat Strategy Game.md deleted file mode 100644 index 77dc34f5af4ba0a8ab4888d13d34885a8bcfbd34..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Battle Cats MOD APK and Enjoy the Ultimate Cat Strategy Game.md +++ /dev/null @@ -1,148 +0,0 @@ -
        -

        Battle Cats Mod APK: A Guide for Cat Lovers

        -

        If you are a fan of cats and tower defense games, you might have heard of Battle Cats, a popular mobile game that lets you command an army of cute but deadly felines. But did you know that there is a way to enjoy this game without spending a dime or waiting for hours? Yes, we are talking about Battle Cats Mod APK, a modified version of the original game that gives you unlimited resources and access to all the features. In this article, we will tell you everything you need to know about Battle Cats Mod APK, including what it is, how to download and install it, how to play it, what are its features, pros and cons, and some tips and tricks. Read on to find out more.

        -

        battle cats mod apk


        Download Filehttps://ssurll.com/2uNVPQ



        -

        What is Battle Cats?

        -

        Before we dive into the details of Battle Cats Mod APK, let us first introduce you to the original game. Battle Cats is a casual tower defense game developed by PONOS Corporation for Android and iOS devices. The game was released in 2012 in Japan, and later in 2014 worldwide. The game has over 10 million downloads on Google Play Store, and has received positive reviews from critics and players alike.

        -

        A casual tower defense game with cute cats

        -

        The gameplay of Battle Cats is simple and straightforward. You have to protect your base from the attacks of various enemies, such as dogs, snakes, stick figures, aliens, zombies, etc. To do this, you have to deploy your own cats on the battlefield, who will automatically march towards the enemy base and fight anything in their way. You can also use a cat cannon and special items to support your cats. The game has a cartoonish and humorous style, with colorful graphics and funny sound effects.

        -

        A gacha game with hundreds of cat units to collect and upgrade

        -

        One of the main attractions of Battle Cats is the variety of cat units that you can collect and upgrade. There are hundreds of cats in the game, each with their own appearance, stats, abilities, and rarity. You can obtain new cats by using cat capsules or gacha, a mechanic that randomly gives you a cat

        for a certain amount of cat food, the in-game currency. You can also upgrade your cats by using XP, which you can earn by completing stages or exchanging cat food. Upgrading your cats will increase their level, stats, and sometimes unlock new forms or abilities. Some cats can also evolve into different forms, such as super rare or uber rare, which have more powerful and unique skills.

        -

        A game with various game modes, stages, and enemies

        -

        Battle Cats also offers a lot of content and challenges for you to enjoy. The game has various game modes, such as the main story mode, the legend mode, the challenge mode, the catclaw dojo, the zombie outbreak, the heavenly tower, etc. Each game mode has different objectives, rules, and rewards. The game also has hundreds of stages to play, each with different backgrounds, enemies, and difficulties. Some stages also have special conditions or restrictions, such as limited cat slots, time limit, or enemy buffs. The game also features a wide range of enemies to fight against, each with their own appearance, stats, abilities, and weaknesses. Some enemies are based on real-life animals or historical figures, while others are more fantastical or absurd.

        -

        What is Battle Cats Mod APK?

        -

        Now that you have a basic idea of what Battle Cats is, let us move on to the main topic of this article: Battle Cats Mod APK. As the name suggests, Battle Cats Mod APK is a modified version of the original game that offers some advantages and benefits that are not available in the official version. However, it also comes with some risks and drawbacks that you should be aware of before downloading and installing it.

        -

        battle cats mod apk unlimited money and cat food
        -battle cats mod apk latest version download
        -battle cats mod apk android 1
        -battle cats mod apk ios
        -battle cats mod apk 2023
        -battle cats mod apk no root
        -battle cats mod apk offline
        -battle cats mod apk all cats unlocked
        -battle cats mod apk god mode
        -battle cats mod apk unlimited xp
        -battle cats mod apk free shopping
        -battle cats mod apk hack
        -battle cats mod apk revdl
        -battle cats mod apk an1
        -battle cats mod apk happymod
        -battle cats mod apk unlimited cat capsules
        -battle cats mod apk online
        -battle cats mod apk unlimited everything
        -battle cats mod apk unlimited rare tickets
        -battle cats mod apk anti ban
        -battle cats mod apk unlimited cat eyes
        -battle cats mod apk max level
        -battle cats mod apk unlimited platinum tickets
        -battle cats mod apk 12.3.0
        -battle cats mod apk 12.2.0
        -battle cats mod apk 12.1.0
        -battle cats mod apk 12.0.0
        -battle cats mod apk 11.9.0
        -battle cats mod apk 11.8.0
        -battle cats mod apk 11.7.0
        -battle cats mod apk 11.6.0
        -battle cats mod apk 11.5.0
        -battle cats mod apk 11.4.0
        -battle cats mod apk 11.3.0
        -battle cats mod apk 11.2.0
        -battle cats mod apk 11.1.0
        -battle cats mod apk 11.0.0
        -the battle cats hack/mod (unlimited money) v12.3.0 for android & ios download link[^1^]
        -the battle cats ultimate hack/mod (unlimited money) v12.3.0 for android & ios download link[^1^]
        -the battle cats infinite hack/mod (unlimited money) v12.3.0 for android & ios download link[^1^]
        -the battle cats super hack/mod (unlimited money) v12.3.0 for android & ios download link[^1^]
        -the battle cats mega hack/mod (unlimited money) v12.3.0 for android & ios download link[^1^]
        -the battle cats extreme hack/mod (unlimited money) v12.3.0 for android & ios download link[^1^]
        -the battle cats ultimate cheat/mod (unlimited money) v12.3.0 for android & ios download link[^1^]
        -the battle cats infinite cheat/mod (unlimited money) v12.3.0 for android & ios download link[^1^]
        -the battle cats super cheat/mod (unlimited money) v12.3.0 for android & ios download link[^1^]
        -the battle cats mega cheat/mod (unlimited money) v12.3.0 for android & ios download link[^1^]
        -the battle cats extreme cheat/mod (unlimited money) v12.3.0 for android & ios download link[^1^]

        -

        A modified version of the original game that offers unlimited resources

        -

        The main reason why some players choose to use Battle Cats Mod APK is because it gives them unlimited resources that they can use to play the game without any limitations or restrictions. For example, Battle Cats Mod APK gives you unlimited XP and cat food, which you can use to upgrade your cats, obtain new cats, or buy special items. It also gives you access to all the cats, treasures, and stages that are normally locked or require a certain amount of cat food or XP to unlock. This way, you can enjoy the game without having to grind or spend real money.

        -

        A way to unlock all the cats, treasures, and stages without spending money

        -

        Another reason why some players prefer Battle Cats Mod APK is because it allows them to unlock all the content and features that the game has to offer without spending any money. As you may know, Battle Cats is a free-to-play game that relies on microtransactions and in-app purchases to generate revenue. This means that some of the best cats, treasures, and stages are either very rare or very expensive to obtain. For example, some uber rare cats can cost up to 1500 cat food per roll, which is equivalent to about $50 in real money. Some stages also require a lot of cat food or XP to clear or unlock. By using Battle Cats Mod APK, you can bypass these costs and get everything for free.

        -

        A risk of getting banned or infected by malware

        -

        However, using Battle Cats Mod APK is not without risks and consequences. First of all, using Battle Cats Mod APK is considered cheating and violating the terms of service of the game. This means that if you are caught using it by the developers or other players, you may face some penalties or sanctions, such as losing your account progress,

        getting banned from the game, or even facing legal action. Therefore, you should use Battle Cats Mod APK at your own risk and discretion, and be careful not to expose yourself to other players or the developers.

        -

        Secondly, using Battle Cats Mod APK may also expose your device and personal information to malware or viruses. Since Battle Cats Mod APK is not an official version of the game, it is not verified or tested by the developers or any reputable source. This means that some Battle Cats Mod APK files may contain harmful or malicious code that can harm your device or steal your data. Therefore, you should only download and install Battle Cats Mod APK from reliable and trustworthy sources, and scan the file with an antivirus software before opening it.

        -

        How to Download and Install Battle Cats Mod APK?

        -

        If you have decided to use Battle Cats Mod APK, you may wonder how to download and install it on your device. Here are the steps that you need to follow:

        -

        Find a reliable source that offers the latest version of the mod

        -

        The first step is to find a website or a platform that provides Battle Cats Mod APK files for download. You can search for them on Google or any other search engine, but make sure to check the reviews, ratings, and comments of other users before choosing one. You should also look for the latest version of the mod, as older versions may not work properly or be compatible with the official game updates.

        -

        Enable unknown sources on your device settings

        -

        The next step is to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store or the App Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on. You may see a warning message that says installing apps from unknown sources may harm your device, but you can ignore it if you trust the source of the mod.

        -

        Download and install the mod APK file

        -

        The final step is to download and install the mod APK file on your device. To do this, go to the website or platform that you have chosen, and look for the download link or button. Tap on it and wait for the file to be downloaded. Once it is done, go to your file manager and locate the file. Tap on it and follow the instructions to install it. You may see a pop-up message that asks for your permission to install the app, just tap on yes or allow. After the installation is complete, you can open the app and enjoy Battle Cats Mod APK.

        -

        How to Play Battle Cats Mod APK?

        -

        Playing Battle Cats Mod APK is not much different from playing the original game. The gameplay mechanics and controls are the same, except that you have unlimited resources and access to all the features. Here are some basic tips on how to play Battle Cats Mod APK:

        -

        Choose your cat army and deploy them on the battlefield

        -

        The first thing you need to do is to choose your cat army from the hundreds of cat units that you have unlocked and upgraded. You can select up to 10 cats per battle, and arrange them in different slots according to their type, cost, and ability. You can also change your cat lineup before each stage depending on the enemy composition and difficulty. To deploy your cats on the battlefield, you need to tap on their icons at the bottom of the screen when you have enough money. The money will automatically increase over time, but you can also speed it up by tapping on it.

        -

        Use the cat cannon and special items to support your cats

        -

        Besides deploying your cats, you can also use other tools to help them win the battle. One of them is the cat cannon, which is a powerful weapon that can damage all the enemies on the screen. You can use the cat cannon by tapping on the button at the top right corner of the screen when it is fully charged. The charge time will depend on the level of your cat base and cat cannon, which you can upgrade with XP. Another tool is the special items, which are consumables that can give you various effects, such as increasing your money, freezing the enemies, or boosting your cats. You can use the special items by tapping on their icons at the top left corner of the screen when you have them. You can get more special items by exchanging cat food or completing certain stages.

        -

        Defeat the enemy base and collect rewards

        -

        The goal of each battle is to defeat the enemy base before they destroy yours. The enemy base is located at the right end of the screen, while your base is at the left end. You can see the health bars of both bases at the top of the screen. To defeat the enemy base, you need to send your cats to attack it and reduce its health to zero. However, you also need to defend your own base from the enemy attacks and prevent them from reaching it. If your base health reaches zero, you will lose the battle and have to retry. If you win the battle, you will collect rewards, such as XP, cat food, cat capsules, or treasures. Treasures are items that can improve your cats' performance, such as increasing their attack, defense, speed, or money production. You can obtain different treasures by clearing different stages.

        -

        What are the Features of Battle Cats Mod APK?

        -

        As we have mentioned earlier, Battle Cats Mod APK offers some features that are not available in the original game. Here are some of them:

        -

        Unlimited XP and cat food

        -

        One of the most obvious features of Battle Cats Mod APK is that it gives you unlimited XP and cat food, which are the main resources in the game. You can use XP to upgrade your cats, cat base, and cat cannon, which will make them stronger and more effective in battle. You can use cat food to obtain new cats, special items, or extra energy, which will allow you to play more stages and modes. With unlimited XP and cat food, you don't have to worry about running out of them or spending real money to get more.

        -

        All cats unlocked and upgraded

        -

        Another feature of Battle Cats Mod APK is that it gives you access to all the cats in the game, regardless of their rarity or availability. You don't have to use cat capsules or gacha to get new cats, as they are already unlocked and ready to use. Moreover, all the cats are already upgraded to their maximum level and form, which means that they have the best stats and abilities possible. You can choose from hundreds of cats and create your own cat army according to your preference and strategy.

        -

        All treasures obtained and stages cleared

        -

        A third feature of Battle Cats Mod APK is that it gives you all the treasures in the game, which are items that can boost your cats' performance in various ways. You don't have to clear certain stages or get lucky to get them, as they are already obtained and activated for you. Furthermore, all the stages in the game are already cleared for you, which means that you don't have to play them or meet their requirements to unlock them. You can access any stage or mode that you want without any restriction or limitation.

        -

        What are the Pros and Cons of Battle Cats Mod APK?

        -

        Like any other modded app or game, Battle Cats Mod APK has its own pros and cons that you should consider before using it. Here are some of them:

        -

        Pros:

        -
          -
        • Easy and fun gameplay
          -

          Battle Cats Mod APK makes the gameplay easier and more fun for you by giving you unlimited resources and access to all the features. You don't have to worry about running out of XP, cat food, energy, or anything else that can limit your progress or enjoyment. You can also experiment with different cat combinations and strategies without any risk or cost. You can play the game at your own pace and style, and have fun with the cute and hilarious cats.

        • -
        • Variety of cats and enemies
          -

          Battle Cats Mod APK also gives you the opportunity to experience the variety of cats and enemies that the game has to offer. You can unlock and use all the cats in the game, from the basic cats to the uber rare cats, and see their unique appearance, stats, abilities, and animations. You can also face all the enemies in the game, from the simple dogs to the epic bosses, and see their diverse design, behavior, and challenge. You can enjoy the creativity and humor of the game developers and appreciate the diversity of the game content.

        • -
        • No need to spend real money
          -

          Another advantage of Battle Cats Mod APK is that it saves you from spending real money on the game. As we have mentioned earlier, Battle Cats is a free-to-play game that relies on microtransactions and in-app purchases to generate revenue. This means that some of the best features and content are either very rare or very expensive to obtain. For example, some uber rare cats can cost up to $50 per roll, which is a lot of money for a mobile game. By using Battle Cats Mod APK, you can get everything for free and avoid wasting your money on something that you may not even get.

        • -
        -

        Cons:

        -
          -
        • Loss of challenge and satisfaction
          -

          One of the drawbacks of Battle Cats Mod APK is that it takes away the challenge and satisfaction that come with playing the original game. By having unlimited resources and access to all the features, you don't have to work hard or overcome any obstacles to achieve your goals or complete your tasks. You don't have to strategize or optimize your cat lineup or gameplay to win the battles or clear the stages. You don't have to earn or save your XP, cat food, energy, or other resources to upgrade your cats or unlock new content. You don't have to feel the thrill of getting a rare cat or a treasure after a long and hard effort. You don't have to feel the pride of completing a difficult stage or mode after many tries and failures. In short, you don't have to feel anything at all, except boredom and emptiness.

        • -
        • Potential security issues and legal consequences
          -

          Another disadvantage of Battle Cats Mod APK is that it exposes you to potential security issues and legal consequences. As we have mentioned earlier, using Battle Cats Mod APK is considered cheating and violating the terms of service of the game. This means that if you are caught using it by the developers or other players, you may face some penalties or sanctions, such as losing your account progress,

          getting banned from the game, or even facing legal action. Therefore, you should use Battle Cats Mod APK at your own risk and discretion, and be careful not to expose yourself to other players or the developers.

          -

          Secondly, using Battle Cats Mod APK may also expose your device and personal information to malware or viruses. Since Battle Cats Mod APK is not an official version of the game, it is not verified or tested by the developers or any reputable source. This means that some Battle Cats Mod APK files may contain harmful or malicious code that can harm your device or steal your data. Therefore, you should only download and install Battle Cats Mod APK from reliable and trustworthy sources, and scan the file with an antivirus software before opening it.

        • -
        • Incompatibility with online features and updates
          -

          A third drawback of Battle Cats Mod APK is that it may not be compatible with the online features and updates of the original game. As you may know, Battle Cats is a game that is constantly updated and improved by the developers, who add new cats, stages, modes, events, and features to the game regularly. However, Battle Cats Mod APK may not be able to support these updates and features, as it is based on an older or different version of the game. This means that you may not be able to enjoy the latest content and features that the game has to offer, or even play the game at all if it requires an update. Moreover, Battle Cats Mod APK may also prevent you from accessing the online features of the game, such as the multiplayer mode, the leaderboards, the rankings, the achievements, etc. This means that you may not be able to interact with other players or compete with them in the game.

        • -
        -

        What are Some Tips and Tricks for Battle Cats Mod APK?

        -

        Even though Battle Cats Mod APK makes the game easier and more fun for you, you may still want to know some tips and tricks that can help you play the game better and enjoy it more. Here are some of them:

        -

        Use different cat combinations for different stages and enemies

        -

        One of the most important aspects of Battle Cats is choosing the right cat combination for each stage and enemy. Even though you have access to all the cats in the game, you still need to consider their type, cost, ability, and synergy when selecting them. For example, some cats are more effective against certain enemies or in certain situations than others. Some cats also have special abilities that can help you in battle, such as freezing, knocking back, slowing down, weakening, or critical hitting the enemies. Some cats also work well together and can create powerful combos or effects. Therefore, you should experiment with different cat combinations and find out what works best for you.

        -

        Upgrade your cat base and cat cannon regularly

        -

        Another important aspect of Battle Cats is upgrading your cat base and cat cannon regularly. Your cat base is your main defense against the enemy attacks, as it determines your base health and money production. Your cat cannon is your main offense against the enemy waves, as it can damage all the enemies on the screen with a single shot. Therefore, you should upgrade your cat base and cat cannon with XP as often as possible to make them stronger and more effective in battle.

        -

        Watch ads and complete offers to get more cat food

        -

        Even though you have unlimited cat food in Battle Cats Mod APK, you may still want to watch ads and complete offers to get more cat food. Why? Because watching ads and completing offers can give you other benefits besides cat food, such as rare tickets, special items,

        energy, or XP. These benefits can help you in the game, especially if you want to play the original version or the online features. Therefore, you should watch ads and complete offers whenever you can to get more rewards and advantages.

        -

        Conclusion

        -

        Battle Cats Mod APK is a modified version of the original game that gives you unlimited resources and access to all the features. It is a fun and easy way to enjoy the game without any limitations or restrictions. However, it also has some risks and drawbacks that you should be aware of before using it, such as losing the challenge and satisfaction, facing security issues and legal consequences, and being incompatible with online features and updates. Therefore, you should use Battle Cats Mod APK at your own risk and discretion, and be careful not to expose yourself to other players or the developers. If you want to play the game the way it was meant to be played, you should stick to the original version and support the game developers.

        -

        FAQs

        -

        Here are some frequently asked questions about Battle Cats Mod APK:

        -
          -
        • Is Battle Cats Mod APK safe to use?
          -

          Battle Cats Mod APK is not an official version of the game, and it is not verified or tested by the developers or any reputable source. This means that some Battle Cats Mod APK files may contain harmful or malicious code that can harm your device or steal your data. Therefore, you should only download and install Battle Cats Mod APK from reliable and trustworthy sources, and scan the file with an antivirus software before opening it.

        • -
        • Is Battle Cats Mod APK legal to use?
          -

          Battle Cats Mod APK is considered cheating and violating the terms of service of the game. This means that if you are caught using it by the developers or other players, you may face some penalties or sanctions, such as losing your account progress, getting banned from the game, or even facing legal action. Therefore, you should use Battle Cats Mod APK at your own risk and discretion, and be careful not to expose yourself to other players or the developers.

        • -
        • Can I play Battle Cats Mod APK online?
          -

          Battle Cats Mod APK may not be compatible with the online features and updates of the original game. This means that you may not be able to enjoy the latest content and features that the game has to offer, or even play the game at all if it requires an update. Moreover, Battle Cats Mod APK may also prevent you from accessing the online features of the game, such as the multiplayer mode, the leaderboards, the rankings, the achievements, etc. This means that you may not be able to interact with other players or compete with them in the game.

        • -
        • Can I switch between Battle Cats Mod APK and the original game?
          -

          You can switch between Battle Cats Mod APK and the original game by uninstalling one version and installing another. However, you should be aware that your account progress and data may not be transferred or synced between the two versions. This means that you may lose your cats, treasures, stages, XP, cat food, etc. when you switch between them. Therefore, you should back up your data before switching between them.

        • -
        • Where can I download Battle Cats Mod APK?
          -

          You can download Battle Cats Mod APK from various websites or platforms that provide modded apps or games for download. You can search for them on Google or any other search engine, but make sure to check the reviews, ratings, and comments of other users before choosing one. You should also look for

          There are many websites or platforms that offer Battle Cats Mod APK for download, but not all of them are reliable or trustworthy. Some of them may contain outdated, corrupted, or infected files that can harm your device or data. Therefore, you should be careful and selective when choosing where to download Battle Cats Mod APK. Here are some of the best websites that we recommend for downloading Battle Cats Mod APK:

          -
            -
          • [APKdone](^1^)
            -

            APKdone is a website that provides modded apps and games for Android devices. It has a large and updated collection of mods, including Battle Cats Mod APK. You can download Battle Cats Mod APK from APKdone for free, and enjoy unlimited XP, cat food, and access to all the features. APKdone also has a user-friendly interface, a fast download speed, and a secure connection. You can also read the reviews, ratings, and comments of other users before downloading the mod.

          • -
          • [Moddroid]
            -

            Moddroid is another website that offers modded apps and games for Android devices. It also has a wide and updated selection of mods, including Battle Cats Mod APK. You can download Battle Cats Mod APK from Moddroid for free, and get unlimited XP, cat food, and access to all the features. Moddroid also has a simple and easy-to-use interface, a high download speed, and a safe connection. You can also check the details, screenshots, and videos of the mod before downloading it.

          • -
          • [HappyMod]
            -

            HappyMod is a platform that allows you to download and install modded apps and games for Android devices. It has a huge and updated library of mods, including Battle Cats Mod APK. You can download Battle Cats Mod APK from HappyMod for free, and enjoy unlimited XP, cat food, and access to all the features. HappyMod also has a friendly and interactive interface, a fast download speed, and a secure connection. You can also see the ratings, votes, and feedback of other users before downloading the mod.

          • -
          -

          These are some of the best websites that we recommend for downloading Battle Cats Mod APK. However, you should still be careful and cautious when using any modded app or game, as they may have some risks and consequences that we have discussed earlier. You should also respect the game developers and support them by playing the original version of the game.

          401be4b1e0
          -
          -
          \ No newline at end of file diff --git a/spaces/sklearn-docs/Test-with-permutations-the-significance-of-a-classification-score/README.md b/spaces/sklearn-docs/Test-with-permutations-the-significance-of-a-classification-score/README.md deleted file mode 100644 index 75f2b1cbc0d40c68203af7b683fd5921611a398e..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/Test-with-permutations-the-significance-of-a-classification-score/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Test With Permutations The Significance Of A Classification Score -emoji: 🐢 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.32.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/utils/options.py b/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/utils/options.py deleted file mode 100644 index db490e4aa52e26fde31959fd74c2cef3af2ecf76..0000000000000000000000000000000000000000 --- a/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/utils/options.py +++ /dev/null @@ -1,108 +0,0 @@ -import yaml -import time -from collections import OrderedDict -from os import path as osp -from basicsr.utils.misc import get_time_str - -def ordered_yaml(): - """Support OrderedDict for yaml. - - Returns: - yaml Loader and Dumper. - """ - try: - from yaml import CDumper as Dumper - from yaml import CLoader as Loader - except ImportError: - from yaml import Dumper, Loader - - _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG - - def dict_representer(dumper, data): - return dumper.represent_dict(data.items()) - - def dict_constructor(loader, node): - return OrderedDict(loader.construct_pairs(node)) - - Dumper.add_representer(OrderedDict, dict_representer) - Loader.add_constructor(_mapping_tag, dict_constructor) - return Loader, Dumper - - -def parse(opt_path, root_path, is_train=True): - """Parse option file. - - Args: - opt_path (str): Option file path. - is_train (str): Indicate whether in training or not. Default: True. - - Returns: - (dict): Options. - """ - with open(opt_path, mode='r') as f: - Loader, _ = ordered_yaml() - opt = yaml.load(f, Loader=Loader) - - opt['is_train'] = is_train - - # opt['name'] = f"{get_time_str()}_{opt['name']}" - if opt['path'].get('resume_state', None): # Shangchen added - resume_state_path = opt['path'].get('resume_state') - opt['name'] = resume_state_path.split("/")[-3] - else: - opt['name'] = f"{get_time_str()}_{opt['name']}" - - - # datasets - for phase, dataset in opt['datasets'].items(): - # for several datasets, e.g., test_1, test_2 - phase = phase.split('_')[0] - dataset['phase'] = phase - if 'scale' in opt: - dataset['scale'] = opt['scale'] - if dataset.get('dataroot_gt') is not None: - dataset['dataroot_gt'] = osp.expanduser(dataset['dataroot_gt']) - if dataset.get('dataroot_lq') is not None: - dataset['dataroot_lq'] = osp.expanduser(dataset['dataroot_lq']) - - # paths - for key, val in opt['path'].items(): - if (val is not None) and ('resume_state' in key or 'pretrain_network' in key): - opt['path'][key] = osp.expanduser(val) - - if is_train: - experiments_root = osp.join(root_path, 'experiments', opt['name']) - opt['path']['experiments_root'] = experiments_root - opt['path']['models'] = osp.join(experiments_root, 'models') - opt['path']['training_states'] = osp.join(experiments_root, 'training_states') - opt['path']['log'] = experiments_root - opt['path']['visualization'] = osp.join(experiments_root, 'visualization') - - else: # test - results_root = osp.join(root_path, 'results', opt['name']) - opt['path']['results_root'] = results_root - opt['path']['log'] = results_root - opt['path']['visualization'] = osp.join(results_root, 'visualization') - - return opt - - -def dict2str(opt, indent_level=1): - """dict to string for printing options. - - Args: - opt (dict): Option dict. - indent_level (int): Indent level. Default: 1. - - Return: - (str): Option string for printing. - """ - msg = '\n' - for k, v in opt.items(): - if isinstance(v, dict): - msg += ' ' * (indent_level * 2) + k + ':[' - msg += dict2str(v, indent_level + 1) - msg += ' ' * (indent_level * 2) + ']\n' - else: - msg += ' ' * (indent_level * 2) + k + ': ' + str(v) + '\n' - return msg diff --git a/spaces/smith2020/WhatsApp-chat-analysis-summary/README.md b/spaces/smith2020/WhatsApp-chat-analysis-summary/README.md deleted file mode 100644 index ebb8fa198d59766b498ac1dc4cf8b533d34e24a0..0000000000000000000000000000000000000000 --- a/spaces/smith2020/WhatsApp-chat-analysis-summary/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: WhatsApp Chat Analysis Summary -emoji: 😻 -colorFrom: pink -colorTo: purple -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sparanoid/milky-green-sovits-4/models.py b/spaces/sparanoid/milky-green-sovits-4/models.py deleted file mode 100644 index 13278d680493970f5a670cf3fc955a6e9b7ab1d5..0000000000000000000000000000000000000000 --- a/spaces/sparanoid/milky-green-sovits-4/models.py +++ /dev/null @@ -1,420 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import modules.attentions as attentions -import modules.commons as commons -import modules.modules as modules - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -import utils -from modules.commons import init_weights, get_padding -from vdecoder.hifigan.models import Generator -from utils import f0_to_coarse - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - # print(x.shape,x_lengths.shape) - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - out_channels, - hidden_channels, - kernel_size, - n_layers, - gin_channels=0, - filter_channels=None, - n_heads=None, - p_dropout=None): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.gin_channels = gin_channels - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - self.f0_emb = nn.Embedding(256, hidden_channels) - - self.enc_ = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - - def forward(self, x, x_mask, f0=None, noice_scale=1): - x = x + self.f0_emb(f0).transpose(1,2) - x = self.enc_(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs) * noice_scale) * x_mask - - return z, m, logs, x_mask - - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SpeakerEncoder(torch.nn.Module): - def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256): - super(SpeakerEncoder, self).__init__() - self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True) - self.linear = nn.Linear(model_hidden_size, model_embedding_size) - self.relu = nn.ReLU() - - def forward(self, mels): - self.lstm.flatten_parameters() - _, (hidden, _) = self.lstm(mels) - embeds_raw = self.relu(self.linear(hidden[-1])) - return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) - - def compute_partial_slices(self, total_frames, partial_frames, partial_hop): - mel_slices = [] - for i in range(0, total_frames-partial_frames, partial_hop): - mel_range = torch.arange(i, i+partial_frames) - mel_slices.append(mel_range) - - return mel_slices - - def embed_utterance(self, mel, partial_frames=128, partial_hop=64): - mel_len = mel.size(1) - last_mel = mel[:,-partial_frames:] - - if mel_len > partial_frames: - mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop) - mels = list(mel[:,s] for s in mel_slices) - mels.append(last_mel) - mels = torch.stack(tuple(mels), 0).squeeze(1) - - with torch.no_grad(): - partial_embeds = self(mels) - embed = torch.mean(partial_embeds, axis=0).unsqueeze(0) - #embed = embed / torch.linalg.norm(embed, 2) - else: - with torch.no_grad(): - embed = self(last_mel) - - return embed - -class F0Decoder(nn.Module): - def __init__(self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - spk_channels=0): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.spk_channels = spk_channels - - self.prenet = nn.Conv1d(hidden_channels, hidden_channels, 3, padding=1) - self.decoder = attentions.FFT( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.f0_prenet = nn.Conv1d(1, hidden_channels , 3, padding=1) - self.cond = nn.Conv1d(spk_channels, hidden_channels, 1) - - def forward(self, x, norm_f0, x_mask, spk_emb=None): - x = torch.detach(x) - if (spk_emb is not None): - x = x + self.cond(spk_emb) - x += self.f0_prenet(norm_f0) - x = self.prenet(x) * x_mask - x = self.decoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - n_speakers, - sampling_rate=44100, - **kwargs): - - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - self.pre = nn.Conv1d(ssl_dim, hidden_channels, kernel_size=5, padding=2) - - self.enc_p = TextEncoder( - inter_channels, - hidden_channels, - filter_channels=filter_channels, - n_heads=n_heads, - n_layers=n_layers, - kernel_size=kernel_size, - p_dropout=p_dropout - ) - hps = { - "sampling_rate": sampling_rate, - "inter_channels": inter_channels, - "resblock": resblock, - "resblock_kernel_sizes": resblock_kernel_sizes, - "resblock_dilation_sizes": resblock_dilation_sizes, - "upsample_rates": upsample_rates, - "upsample_initial_channel": upsample_initial_channel, - "upsample_kernel_sizes": upsample_kernel_sizes, - "gin_channels": gin_channels, - } - self.dec = Generator(h=hps) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - self.f0_decoder = F0Decoder( - 1, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - spk_channels=gin_channels - ) - self.emb_uv = nn.Embedding(2, hidden_channels) - - def forward(self, c, f0, uv, spec, g=None, c_lengths=None, spec_lengths=None): - g = self.emb_g(g).transpose(1,2) - # ssl prenet - x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype) - x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1,2) - - # f0 predict - lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500 - norm_lf0 = utils.normalize_f0(lf0, x_mask, uv) - pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g) - - # encoder - z_ptemp, m_p, logs_p, _ = self.enc_p(x, x_mask, f0=f0_to_coarse(f0)) - z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g) - - # flow - z_p = self.flow(z, spec_mask, g=g) - z_slice, pitch_slice, ids_slice = commons.rand_slice_segments_with_pitch(z, f0, spec_lengths, self.segment_size) - - # nsf decoder - o = self.dec(z_slice, g=g, f0=pitch_slice) - - return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q), pred_lf0, norm_lf0, lf0 - - def infer(self, c, f0, uv, g=None, noice_scale=0.35, predict_f0=False): - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - g = self.emb_g(g).transpose(1,2) - x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype) - x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1,2) - - if predict_f0: - lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500 - norm_lf0 = utils.normalize_f0(lf0, x_mask, uv, random_scale=False) - pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g) - f0 = (700 * (torch.pow(10, pred_lf0 * 500 / 2595) - 1)).squeeze(1) - - z_p, m_p, logs_p, c_mask = self.enc_p(x, x_mask, f0=f0_to_coarse(f0), noice_scale=noice_scale) - z = self.flow(z_p, c_mask, g=g, reverse=True) - o = self.dec(z * c_mask, g=g, f0=f0) - return o diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/roberta/commonsense_qa/README.md b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/roberta/commonsense_qa/README.md deleted file mode 100644 index 7f386decd87d93bf701e2e313c7fea39d982224f..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/roberta/commonsense_qa/README.md +++ /dev/null @@ -1,99 +0,0 @@ -# Finetuning RoBERTa on Commonsense QA - -We follow a similar approach to [finetuning RACE](../README.race.md). Specifically -for each question we construct five inputs, one for each of the five candidate -answer choices. Each input is constructed by concatenating the question and -candidate answer. We then encode each input and pass the resulting "[CLS]" -representations through a fully-connected layer to predict the correct answer. -We train with a standard cross-entropy loss. - -We also found it helpful to prepend a prefix of `Q:` to the question and `A:` to -the answer. The complete input format is: -``` - Q: Where would I not want a fox? A: hen house -``` - -Our final submission is based on a hyperparameter search over the learning rate -(1e-5, 2e-5, 3e-5), batch size (8, 16), number of training steps (2000, 3000, -4000) and random seed. We selected the model with the best performance on the -development set after 100 trials. - -### 1) Download data from the Commonsense QA website (https://www.tau-nlp.org/commonsenseqa) -```bash -bash examples/roberta/commonsense_qa/download_cqa_data.sh -``` - -### 2) Finetune - -```bash -MAX_UPDATES=3000 # Number of training steps. -WARMUP_UPDATES=150 # Linearly increase LR over this many steps. -LR=1e-05 # Peak LR for polynomial LR scheduler. -MAX_SENTENCES=16 # Batch size. -SEED=1 # Random seed. -ROBERTA_PATH=/path/to/roberta/model.pt -DATA_DIR=data/CommonsenseQA - -# we use the --user-dir option to load the task from -# the examples/roberta/commonsense_qa directory: -FAIRSEQ_PATH=/path/to/fairseq -FAIRSEQ_USER_DIR=${FAIRSEQ_PATH}/examples/roberta/commonsense_qa - -CUDA_VISIBLE_DEVICES=0 fairseq-train --fp16 --ddp-backend=legacy_ddp \ - $DATA_DIR \ - --user-dir $FAIRSEQ_USER_DIR \ - --restore-file $ROBERTA_PATH \ - --reset-optimizer --reset-dataloader --reset-meters \ - --no-epoch-checkpoints --no-last-checkpoints --no-save-optimizer-state \ - --best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \ - --task commonsense_qa --init-token 0 --bpe gpt2 \ - --arch roberta_large --max-positions 512 \ - --dropout 0.1 --attention-dropout 0.1 --weight-decay 0.01 \ - --criterion sentence_ranking --num-classes 5 \ - --optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-06 --clip-norm 0.0 \ - --lr-scheduler polynomial_decay --lr $LR \ - --warmup-updates $WARMUP_UPDATES --total-num-update $MAX_UPDATES \ - --batch-size $MAX_SENTENCES \ - --max-update $MAX_UPDATES \ - --log-format simple --log-interval 25 \ - --seed $SEED -``` - -The above command assumes training on 1 GPU with 32GB of RAM. For GPUs with -less memory, decrease `--batch-size` and increase `--update-freq` -accordingly to compensate. - -### 3) Evaluate -```python -import json -import torch -from fairseq.models.roberta import RobertaModel -from examples.roberta import commonsense_qa # load the Commonsense QA task -roberta = RobertaModel.from_pretrained('checkpoints', 'checkpoint_best.pt', 'data/CommonsenseQA') -roberta.eval() # disable dropout -roberta.cuda() # use the GPU (optional) -nsamples, ncorrect = 0, 0 -with open('data/CommonsenseQA/valid.jsonl') as h: - for line in h: - example = json.loads(line) - scores = [] - for choice in example['question']['choices']: - input = roberta.encode( - 'Q: ' + example['question']['stem'], - 'A: ' + choice['text'], - no_separator=True - ) - score = roberta.predict('sentence_classification_head', input, return_logits=True) - scores.append(score) - pred = torch.cat(scores).argmax() - answer = ord(example['answerKey']) - ord('A') - nsamples += 1 - if pred == answer: - ncorrect += 1 - -print('Accuracy: ' + str(ncorrect / float(nsamples))) -# Accuracy: 0.7846027846027847 -``` - -The above snippet is not batched, which makes it quite slow. See [instructions -for batched prediction with RoBERTa](https://github.com/pytorch/fairseq/tree/main/examples/roberta#batched-prediction). diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/optim/adam.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/optim/adam.py deleted file mode 100644 index d3ae9e64a74774310adcd9968d2eae23368890f9..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/optim/adam.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import math -from collections.abc import Collection -from dataclasses import dataclass, field -from typing import Any, List - -import torch -import torch.distributed as dist -import torch.optim -from fairseq.dataclass import FairseqDataclass -from fairseq.optim import FairseqOptimizer, register_optimizer -from fairseq.optim.fused_adam import get_fused_adam_class -from omegaconf import II, OmegaConf - - -logger = logging.getLogger(__name__) - - -@dataclass -class FairseqAdamConfig(FairseqDataclass): - adam_betas: Any = field( - default=(0.9, 0.999), metadata={"help": "betas for Adam optimizer"} - ) - adam_eps: float = field( - default=1e-8, metadata={"help": "epsilon for Adam optimizer"} - ) - weight_decay: float = field(default=0.0, metadata={"help": "weight decay"}) - use_old_adam: bool = field( - default=False, metadata={"help": "Use fairseq.optim.adam.Adam"} - ) - fp16_adam_stats: bool = field( - default=False, metadata={"help": "use FP16 stats (with automatic scaling)"} - ) - # TODO common vars below in parent - tpu: bool = II("common.tpu") - lr: List[float] = II("optimization.lr") - - -@register_optimizer("adam", dataclass=FairseqAdamConfig) -class FairseqAdam(FairseqOptimizer): - """Adam optimizer for fairseq. - - Important note: this optimizer corresponds to the "AdamW" variant of - Adam in its weight decay behavior. As such, it is most closely - analogous to torch.optim.AdamW from PyTorch. - """ - - def __init__(self, cfg: FairseqAdamConfig, params): - super().__init__(cfg) - fused_adam_cls = get_fused_adam_class() - use_fused_adam = ( - not getattr(cfg, "use_old_adam", False) - and fused_adam_cls is not None - and torch.cuda.is_available() - ) - if getattr(cfg, "tpu", False): - if self.cfg.fp16_adam_stats: - raise NotImplementedError("--fp16-adam-stats is only supported on GPU") - # on TPUs we use the Adam defined here, since it - # automatically casts gradients to FP32 - self._optimizer = Adam(params, **self.optimizer_config) - elif use_fused_adam: - logger.info("using FusedAdam") - self._optimizer = fused_adam_cls( - params, - use_fp16_stats=self.cfg.fp16_adam_stats, - **self.optimizer_config - ) - else: - if self.cfg.fp16_adam_stats: - raise NotImplementedError("--fp16-adam-stats is only supported with FusedAdamV1") - self._optimizer = Adam(params, **self.optimizer_config) - - @property - def optimizer_config(self): - """ - Return a kwarg dictionary that will be used to override optimizer - args stored in checkpoints. This allows us to load a checkpoint and - resume training using a different set of optimizer args, e.g., with a - different learning rate. - """ - return { - "lr": self.cfg.lr[0] - if isinstance(self.cfg.lr, Collection) - else self.cfg.lr, - "betas": eval(self.cfg.adam_betas) - if isinstance(self.cfg.adam_betas, str) - else OmegaConf.to_container(self.cfg.adam_betas), - "eps": self.cfg.adam_eps, - "weight_decay": self.cfg.weight_decay, - } - - def average_params(self): - """Reduce Params is only used during BMUF distributed training.""" - state_dict = self.optimizer.state_dict() - total_gpus = float(dist.get_world_size()) - - for _, value in state_dict["state"].items(): - value["exp_avg"] /= total_gpus - value["exp_avg_sq"] /= total_gpus - dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM) - dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM) - - -class Adam(torch.optim.Optimizer): - r"""Implements Adam algorithm. - - This implementation is modified from torch.optim.Adam based on: - `Fixed Weight Decay Regularization in Adam` - (see https://arxiv.org/abs/1711.05101) - - It has been proposed in `Adam: A Method for Stochastic Optimization`_. - - Args: - params (iterable): iterable of parameters to optimize or dicts defining - parameter groups - lr (float, optional): learning rate (default: 1e-3) - betas (Tuple[float, float], optional): coefficients used for computing - running averages of gradient and its square (default: (0.9, 0.999)) - eps (float, optional): term added to the denominator to improve - numerical stability (default: 1e-8) - weight_decay (float, optional): weight decay (L2 penalty) (default: 0) - amsgrad (boolean, optional): whether to use the AMSGrad variant of this - algorithm from the paper `On the Convergence of Adam and Beyond`_ - - .. _Adam\: A Method for Stochastic Optimization: - https://arxiv.org/abs/1412.6980 - .. _On the Convergence of Adam and Beyond: - https://openreview.net/forum?id=ryQu7f-RZ - """ - - def __init__( - self, - params, - lr=1e-3, - betas=(0.9, 0.999), - eps=1e-8, - weight_decay=0, - amsgrad=False, - ): - defaults = dict( - lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad - ) - super(Adam, self).__init__(params, defaults) - - @property - def supports_memory_efficient_fp16(self): - return True - - @property - def supports_flat_params(self): - return True - - def step(self, closure=None): - """Performs a single optimization step. - - Args: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - for p in group["params"]: - if p.grad is None: - continue - grad = p.grad.data - if grad.dtype in {torch.float16, torch.bfloat16}: - grad = grad.float() - if grad.is_sparse: - raise RuntimeError( - "Adam does not support sparse gradients, please consider SparseAdam instead" - ) - amsgrad = group.get("amsgrad", False) - - p_data_fp32 = p.data - if p.data.dtype in {torch.float16, torch.bfloat16}: - p_data_fp32 = p_data_fp32.float() - - state = self.state[p] - - # State initialization - if len(state) == 0: - state["step"] = 0 - # Exponential moving average of gradient values - state["exp_avg"] = torch.zeros_like(p_data_fp32) - # Exponential moving average of squared gradient values - state["exp_avg_sq"] = torch.zeros_like(p_data_fp32) - if amsgrad: - # Maintains max of all exp. moving avg. of sq. grad. values - state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32) - else: - state["exp_avg"] = state["exp_avg"].to(p_data_fp32) - state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32) - if amsgrad: - state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to( - p_data_fp32 - ) - - exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] - if amsgrad: - max_exp_avg_sq = state["max_exp_avg_sq"] - beta1, beta2 = group["betas"] - - state["step"] += 1 - - # Decay the first and second moment running average coefficient - exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) - exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) - if amsgrad: - # Maintains the maximum of all 2nd moment running avg. till now - torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) - # Use the max. for normalizing running avg. of gradient - denom = max_exp_avg_sq.sqrt().add_(group["eps"]) - else: - denom = exp_avg_sq.sqrt().add_(group["eps"]) - - bias_correction1 = 1 - beta1 ** state["step"] - bias_correction2 = 1 - beta2 ** state["step"] - step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1 - - if group["weight_decay"] != 0: - p_data_fp32.add_( - p_data_fp32, alpha=-group["weight_decay"] * group["lr"] - ) - - p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size) - - if p.data.dtype in {torch.float16, torch.bfloat16}: - p.data.copy_(p_data_fp32) - - return loss diff --git a/spaces/stamps-labs/stamp2vec/embedding_models/vits8/oml/create_dataset.py b/spaces/stamps-labs/stamp2vec/embedding_models/vits8/oml/create_dataset.py deleted file mode 100644 index 2dc410156205c0d6ffec417ab5e8415e3a5ee28c..0000000000000000000000000000000000000000 --- a/spaces/stamps-labs/stamp2vec/embedding_models/vits8/oml/create_dataset.py +++ /dev/null @@ -1,71 +0,0 @@ -import os -from PIL import Image -import pandas as pd - -import argparse - -parser = argparse.ArgumentParser("Create a dataset for training with OML", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - -parser.add_argument("--root-data-path", help="Path to images for dataset", default="data/train_val/") -parser.add_argument("--image-data-path", help="Image folder in root data path", default="images/") -parser.add_argument("--train-val-split", - help="In which ratio to split data in format train:val (For example 80:20)", default="80:20") -parser.add_argument("--separator", - help="What separator is used in image name to separate class name and instance (E.g. circle1_5, separator=_)", - default="_") - -args = parser.parse_args() -config = vars(args) - -root_path = config["root_data_path"] -image_path = config["image_data_path"] -separator = config["separator"] - -train_prc, val_prc = tuple(int(num)/100 for num in config["train_val_split"].split(":")) - -class_names = set() -for image in os.listdir(root_path+image_path): - if image.endswith(("png", "jpg", "bmp", "webp")): - img_name = image.split(".")[0] - Image.open(root_path+image_path+image).resize((224,224)).save(root_path+image_path+img_name+".png", "PNG") - if not image.endswith("png"): - os.remove(root_path+image_path+image) - img_name = img_name.split(separator) - class_name = img_name[0]+img_name[1] - class_names.add(class_name) - else: - print("Not all of the images are in supported format") - - -#For each class in set assign its index in a set as a class label. -class_label_dict = {} -for ind, name in enumerate(class_names): - class_label_dict[name] = ind - -class_count = len(class_names) -train_class_count = int(class_count*train_prc) -print(train_class_count) - -df_dict = {"label": [], - "path": [], - "split": [], - "is_query": [], - "is_gallery": []} -for image in os.listdir(root_path+image_path): - if image.endswith((".png", ".jpg", ".bmp", ".webp")): - img_name = image.split(".")[0].split(separator) - class_name = img_name[0]+img_name[1] - label = class_label_dict[class_name] - path = image_path+image - split = "train" if label <= train_class_count else "validation" - is_query, is_gallery = (1, 1) if split=="validation" else (None, None) - df_dict["label"].append(label) - df_dict["path"].append(path) - df_dict["split"].append(split) - df_dict["is_query"].append(is_query) - df_dict["is_gallery"].append(is_gallery) - -df = pd.DataFrame(df_dict) - -df.to_csv(root_path+"df_stamps.csv", index=False) \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/AYMO.SET Korg Pa3x.rar 18.md b/spaces/stomexserde/gpt4-ui/Examples/AYMO.SET Korg Pa3x.rar 18.md deleted file mode 100644 index dea821eacff654e452fa1f9766669037cdcf6269..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/AYMO.SET Korg Pa3x.rar 18.md +++ /dev/null @@ -1,111 +0,0 @@ - -

          Introduction

          -

          If you are a musician or a producer who owns a Korg Pa3x keyboard, you might be interested in expanding your sonic palette with new sounds and styles. One of the ways to do that is to download or buy sets that are compatible with your keyboard. These sets are collections of sounds and styles that can enhance your musical performance and creativity.

          -

          AYMO.SET Korg Pa3x.rar 18


          DOWNLOAD https://urlgoal.com/2uI6dm



          -

          One of the sets that you might have come across is AYMO.SET Korg Pa3x.rar 18. This is a file name that refers to a set of sounds and styles for the Korg Pa3x keyboard, which is a professional arranger that can play various musical genres and instruments. The file name also indicates that the set is compressed in a RAR format, and that it is the 18th version of the set.

          -

          But what exactly is AYMO.SET Korg Pa3x.rar 18? What does it offer to you as a user of Korg Pa3x? And how can you install and use it on your keyboard? In this article, we will answer these questions and more. We will review the features, pros and cons, and comparison with other sets for Korg Pa3x. We will also provide you with some FAQs and a link to download or purchase the set.

          -

          Features of AYMO.SET Korg Pa3x.rar 18

          How many sounds and styles are included in the set?

          -

          AYMO.SET Korg Pa3x.rar 18 is a large and diverse set that contains over 2000 sounds and over 1000 styles for your Korg Pa3x keyboard. These sounds and styles cover a wide range of musical genres and instruments, such as pop, rock, jazz, blues, country, folk, classical, ethnic, world, oriental, Arabic, Turkish, Persian, Balkan, and more. You can find pianos, organs, guitars, basses, drums, percussion, strings, brass, woodwinds, synths, pads, leads, effects, vocals, and more. You can also find sounds and styles that are specific to certain artists or songs, such as Beatles, Abba, Pink Floyd, Michael Jackson, Adele, Ed Sheeran, and more.

          -

          What are some of the genres and instruments covered by the set?

          -

          To give you an idea of the variety and quality of the sounds and styles in AYMO.SET Korg Pa3x.rar 18, here are some examples of the genres and instruments that you can find in the set:

          -

          - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          GenreInstruments
          PopPiano Pop Ballad Style
          Electric Piano Sound
          Acoustic Guitar Sound
          Pop Strings Sound
          Pop Drums Sound
          RockRock Shuffle Style
          Distortion Guitar Sound
          Bass Guitar Sound
          Hammond Organ Sound
          Rock Drums Sound
          JazzJazz Swing Style
          Grand Piano Sound
          Upright Bass Sound
          Saxophone Sound
          Jazz Drums Sound
          BluesBlues Shuffle Style
          Honky Tonk Piano Sound
          Harmonica Sound
          Guitar Slide Sound
          Blues Drums Sound
          CountryCountry Waltz Style
          Banjo Sound
          Fiddle Sound
          Pedal Steel Guitar Sound
          Country Drums Sound
          FolkFolk Ballad Style
          Nylon Guitar Sound
          Mandolin Sound
          Accordion Sound
          Folk Drums Sound
          ClassicalClassical Waltz Style
          Harp Sound
          Cello Sound
          Oboe Sound
          Timpani Sound
          EthnicEthnic Dance Style
          Sitar Sound
          Bouzouki Sound
          Duduk Sound
          Darbuka Sound
          OrientalOriental Pop Style
          Kanun Sound
          Oud Sound
          Ney Sound
          Riqq Sound
          TurkishTurkish Folk Style
          Saz Sound
          Cumbus Sound
          Zurna Sound
          Davul Sound
          PersianPersian Pop Style
          Santur Sound
          Tar Sound >
          Kamancheh Sound >
          Zarb Sound >
          Balkan >Balkan Dance Style >
          Tambura Sound >
          Gaida Sound >
          Kaval Sound >
          Tapan Sound >

          How to install and use the set on your Korg Pa3x keyboard?

          To install and use AYMO.SET Korg Pa3x.rar 18 on your Korg Pa3x keyboard, you need to follow these steps:

          1. Download or buy AYMO.SET Korg Pa3x.rar 18 from a reliable source. You can find a link to download or purchase the set at the end of this article.
          2. Unzip or extract the RAR file using a software like WinRAR or 7-Zip. You will get a folder named AYMO.SET Korg Pa3x.
          3. Copy the folder to a USB flash drive or an SD card that is compatible with your Korg Pa3x keyboard.
          4. Insert the USB flash drive or the SD card into your Korg Pa3x keyboard.
          5. Select MEDIA mode on your keyboard.
          6. Select LOAD on the display screen.
          7. -
          8. Select the USB or the SD card as the source device.
          9. -
          10. Select the AYMO.SET Korg Pa3x folder and press OPEN.
          11. -
          12. Select the AYMO.SET Korg Pa3x.SET file and press LOAD.
          13. -
          14. Wait for the loading process to complete. You will see a message that says "Loading Completed".
          15. -
          16. Press EXIT to return to the main screen.
          17. -
          -

          Now you can use the sounds and styles from AYMO.SET Korg Pa3x.rar 18 on your keyboard. To access the sounds, select SOUND mode on your keyboard. To access the styles, select STYLE mode on your keyboard. You can browse and select the sounds and styles using the dial, the buttons, or the touch screen. You can also customize and save the sounds and styles to your liking using the EDIT and WRITE functions on your keyboard.

          -

          Pros and Cons of AYMO.SET Korg Pa3x.rar 18

          -

          AYMO.SET Korg Pa3x.rar 18 is a great set for anyone who wants to expand their musical possibilities with their Korg Pa3x keyboard. However, like any other product, it has its pros and cons. Here are some of the advantages and disadvantages of using AYMO.SET Korg Pa3x.rar 18:

          -

          Pros

          -
            -
          • It offers a large and diverse collection of sounds and styles that cover various musical genres and instruments.
          • -
          • It provides high-quality and realistic sounds and styles that can enhance your musical performance and creativity.
          • -
          • It is easy to install and use on your Korg Pa3x keyboard.
          • -
          • It is compatible with other sets and resources for Korg Pa3x.
          • -
          • It is affordable and worth the price.
          • -
          -

          Cons

          -
            -
          • It requires a lot of memory space on your USB or SD card, as well as on your keyboard.
          • -
          • It might not suit everyone's taste or preference in terms of sounds and styles.
          • -
          • It might not include some of the sounds or styles that you are looking for or need.
          • -
          • It might have some bugs or errors that need to be fixed or updated.
          • -
          • It might not be available or accessible in some regions or countries.
          • -
          -

          Comparison with Other Sets for Korg Pa3x

          -

          AYMO.SET Korg Pa3x.rar 18 is not the only set that you can use for your Korg Pa3x keyboard. There are many other sets that are compatible with your keyboard, and that offer different sounds and styles. Some of these sets are:

          -

          KORG PA Manager

          -

          KORG PA Manager is a software that allows you to manage and organize your sets for Korg Pa keyboards. You can use it to create, edit, copy, delete, rename, merge, split, convert, import, export, backup, restore, and more. You can also use it to preview and play the sounds and styles from your sets. You can download or buy KORG PA Manager from https://www.korgpamanager.com/.

          -

          Oriental SET for Korg Pa 800 - Pa2x - Pa3x

          -

          Oriental SET for Korg Pa 800 - Pa2x - Pa3x is a set that focuses on oriental sounds and styles for Korg Pa keyboards. It includes over 1500 sounds and over 500 styles that cover Arabic, Turkish, Persian, Kurdish, Afghan, Indian, Pakistani, and more. It also includes drum kits, pads, loops, effects, vocals, and more. You can download or buy Oriental SET for Korg Pa 800 - Pa2x - Pa3x from https://www.korgworld.co.uk/product/oriental-set-for-korg-pa-800-pa2x-pa3x/.

          -

          Balkan SET for Korg Pa600 - Pa900 - Pa3x

          -

          Balkan SET for Korg Pa600 - Pa900 - Pa3x is a set that focuses on Balkan sounds and styles for Korg Pa keyboards. It includes over 1000 sounds and over 300 styles that cover Serbian, Croatian, Bosnian, Macedonian, Bulgarian, Romanian, Greek, Albanian, Turkish, Gypsy, and more. It also includes drum kits, pads, loops, effects, vocals , and more. You can download or buy Balkan SET for Korg Pa600 - Pa900 - Pa3x from https://www.korgworld.co.uk/product/balkan-set-for-korg-pa600-pa900-pa3x/.

          -

          How does AYMO.SET Korg Pa3x.rar 18 compare with other sets for Korg Pa3x?

          -

          AYMO.SET Korg Pa3x.rar 18 is a set that stands out from other sets for Korg Pa3x in terms of quality, quantity, diversity, and compatibility. It offers more sounds and styles than most of the other sets, and covers more genres and instruments than most of the other sets. It also provides realistic and high-quality sounds and styles that can suit any musical taste or preference. It is compatible with other sets and resources for Korg Pa3x, and can be easily installed and used on your keyboard.

          -

          However, AYMO.SET Korg Pa3x.rar 18 is not the only set that you can use for your Korg Pa3x keyboard. Depending on your needs and preferences, you might find other sets that are more suitable or appealing to you. For example, if you are looking for a set that focuses on a specific genre or region, such as oriental or Balkan, you might prefer the sets that are dedicated to those genres or regions. If you are looking for a set that allows you to manage and organize your sets more easily, you might prefer the software that offers that functionality.

          -

          Therefore, the best way to compare AYMO.SET Korg Pa3x.rar 18 with other sets for Korg Pa3x is to try them out yourself. You can download or buy the sets from the links provided above, or from other sources that you trust. You can also watch or listen to some demos or reviews of the sets on YouTube or other platforms. You can then decide which set is the best for you and your keyboard.

          -

          Conclusion

          -

          In this article, we have reviewed AYMO.SET Korg Pa3x.rar 18, a set of sounds and styles for the Korg Pa3x keyboard. We have discussed what it is, what it offers, how to install and use it, what are its pros and cons, and how it compares with other sets for Korg Pa3x.

          -

          We have concluded that AYMO.SET Korg Pa3x.rar 18 is a great set for anyone who wants to expand their musical possibilities with their Korg Pa3x keyboard. It offers a large and diverse collection of sounds and styles that cover various musical genres and instruments. It provides high-quality and realistic sounds and styles that can enhance your musical performance and creativity. It is easy to install and use on your keyboard. It is compatible with other sets and resources for Korg Pa3x. It is affordable and worth the price.

          -

          However, AYMO.SET Korg Pa3x.rar 18 is not the only set that you can use for your Korg Pa3x keyboard. Depending on your needs and preferences, you might find other sets that are more suitable or appealing to you. For example, if you are looking for a set that focuses on a specific genre or region, such as oriental or Balkan, you might prefer the sets that are dedicated to those genres or regions. If you are looking for a set that allows you to manage and organize your sets more easily, you might prefer the software that offers that functionality.

          -

          Therefore, we recommend that you try out AYMO.SET Korg Pa3x.rar 18 yourself, as well as other sets for Korg Pa3x, and decide which one is the best for you and your keyboard.

          -

          If you are interested in downloading or buying AYMO.SET Korg Pa3x.rar 18, you can use this link: https://www.korgworld.co.uk/product/aymo-set-korg-pa3x-rar-18/.

          -

          FAQs

          -

          Where can I download or buy AYMO.SET Korg Pa3x.rar 18?

          -

          You can download or buy AYMO.SET Korg Pa3x.rar 18 from this link: https://www.korgworld.co.uk/product/aymo-set-korg-pa3x-rar-18/. This is a reliable source that offers secure payment and fast delivery. You can also find other sources online that offer the set, but make sure they are trustworthy and legitimate before downloading or buying anything from them.

          -

          How can I unzip or extract the RAR file?

          -

          You can unzip or extract the RAR file using a software like WinRAR or 7-Zip. You can download these software from their official websites: https://www.win-rar.com/ or https://www.7-zip.org/. After downloading and installing the software, you can right-click on the RAR file and select "Extract Here" or "Extract to AYMO.SET Korg Pa3x". You will get a folder named AYMO.SET Korg Pa3x that contains the set file.

          -

          How can I update or customize the set?

          -

          You can update or customize the set using the EDIT and WRITE functions on your Korg Pa3x keyboard. You can access these functions by pressing the EDIT or WRITE buttons on your keyboard, or by selecting them from the display screen. You can then edit or write the sounds and styles according to your preferences. You can change the parameters, effects, volumes, tempos, scales, chords, and more. You can also save your changes to the set file or to a new file.

          -

          How can I contact the creator or developer of the set?

          -

          You can contact the creator or developer of the set by sending an email to aymo.set@gmail.com. You can also visit their website at https://www.aymoset.com/. You can ask them any questions, suggestions, feedback, or requests regarding the set. You can also follow them on social media platforms such as Facebook, Twitter, Instagram, and YouTube.

          -

          How can I get more sets or resources for my Korg Pa3x keyboard?

          -

          You can get more sets or resources for your Korg Pa3x keyboard by visiting the official website of Korg at https://www.korg.com/. You can find manuals, drivers, updates, software, tutorials, videos, and more for your keyboard. You can also join the online community of Korg users and enthusiasts at https://www.korgforums.com/. You can share your experiences, tips, tricks, questions, answers, and more with other Korg users. You can also download or buy more sets from other sources online, but make sure they are trustworthy and legitimate before downloading or buying anything from them.

          -

          I hope you enjoyed reading this article and learned something new about AYMO.SET Korg Pa3x.rar 18. If you have any comments or questions, please feel free to leave them below. Thank you for your time and attention.

          b2dd77e56b
          -
          -
          \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Bandicam 2.4.2.905 (2015) RePack Portable !EXCLUSIVE!.md b/spaces/stomexserde/gpt4-ui/Examples/Bandicam 2.4.2.905 (2015) RePack Portable !EXCLUSIVE!.md deleted file mode 100644 index d4f76d6ad5a37df755e35089525d2a18dae6d6b6..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Bandicam 2.4.2.905 (2015) RePack Portable !EXCLUSIVE!.md +++ /dev/null @@ -1,107 +0,0 @@ -
          -

          Bandicam 2.4.2.905 (2015) RePack Portable: A Review

          -

          Do you want to record your screen activity for different purposes? Do you want to create high-quality videos with a small file size? Do you want to use a simple and easy-to-use software that has many options for video recording? If you answered yes to any of these questions, then you might want to check out Bandicam 2.4.2.905 (2015) RePack Portable.

          -

          Bandicam is a special video recording software for desktop activity that can compress the video quality during the recording and upload your recorded videos on YouTube easily. You can also record videos up to 2560x1600 resolutions with less CPU usage with this software.

          -

          Bandicam 2.4.2.905 (2015) RePack Portable


          Download Zip ✵✵✵ https://urlgoal.com/2uI9ex



          -

          In this article, we will review Bandicam 2.4.2.905 (2015) RePack Portable, which is a modified version of Bandicam that does not require installation and can be run from any portable device such as a USB flash drive or an external hard drive.

          -

          We will cover the features, pros and cons, download and installation process, usage tutorial, tips and tricks, and FAQs about this version of Bandicam.

          -

          By the end of this article, you will have a clear idea of whether Bandicam 2.4.2.905 (2015) RePack Portable is suitable for your needs or not.

          -

          Features of Bandicam 2.4.2.905 (2015) RePack Portable

          -

          Bandicam 2.4.2.905 (2015) RePack Portable has some of the following features that make it stand out from other versions of Bandicam:

          -
            -
          • Portable: You can run this version of Bandicam from any portable device without installing it on your PC. This means you can use it on any PC without leaving any traces or affecting the system performance.
          • -
          • RePack: This version of Bandicam has been repacked by a third-party developer to remove some unnecessary components and optimize the software for better performance and compatibility. This means you can enjoy a faster and smoother video recording experience with this version of Bandicam.
          • -
          • High-quality video recording: You can record videos up to 2560x1600 resolutions with this version of Bandicam. You can also choose from different video formats such as AVI, MP4, or MPEG-1. You can also adjust the video quality, frame rate, and bitrate according to your preferences.
          • -
          • Small file size: You can compress the video quality during the recording with this version of Bandicam. This means you can save disk space and upload your videos faster on YouTube or other platforms. You can also use the H.264 codec, which is known for its high compression ratio and quality.
          • -
          • Easy-to-use interface: You can easily access all the features and options of this version of Bandicam with a simple and intuitive interface. You can also customize the hotkeys, settings, and preferences according to your needs.
          • -
          • Multiple recording modes: You can choose from different recording modes with this version of Bandicam. You can record the full screen, a specific area, a game, a webcam, or a device such as a smartphone or a console. You can also record audio from your microphone or speakers.
          • -
          • Real-time drawing function: You can draw lines, boxes, or highlights on your screen while recording with this version of Bandicam. This can help you emphasize or explain something in your video.
          • -
          • Mouse effects function: You can add mouse effects such as cursor highlighting, click effects, or mouse sounds to your video with this version of Bandicam. This can help you make your video more interactive and engaging.
          • -
          • Logo function: You can add your own logo or watermark to your video with this version of Bandicam. This can help you protect your video from unauthorized use or promote your brand.
          • -
          • Scheduled recording function: You can set a specific time and duration for your video recording with this version of Bandicam. This can help you record something automatically without being present.
          • -
          -

          Pros and Cons of Bandicam 2.4.2.905 (2015) RePack Portable

          -

          As with any software, Bandicam 2.4.2.905 (2015) RePack Portable has its own pros and cons that you should consider before using it. Here are some of them:

          - - - - - - - - - - - - - - - - - - - - - - - - - -
          ProsCons
          No installation requiredPossible compatibility issues with some PCs or games
          Optimized performance and compatibilityPossible bugs or errors due to repacking process
          High-quality video recording with small file sizeLimited editing options within the software
          Easy-to-use interface with multiple recording modes and functionsLimited customer support from the official developer
          Free to use without any limitations or watermarksPossible legal issues due to unauthorized modification of the software
          -

          As you can see, Bandicam 2.4.2.905 (2015) RePack Portable has many benefits but also some drawbacks that you should be aware of before using it.

          -

          -

          How to Download and Install Bandicam 2.4.2.905 (2015) RePack Portable

          -

          If you want to try out Bandicam 2.4.2.905 (2015) RePack Portable, you will need to download it from a reliable source and extract it on your portable device. Here are the steps to do so:

          -
            -
          1. Go to this link, which is one of the sources where you can download Bandicam 2.4.2.905 (2015) RePack Portable safely and securely. Click on the green "Download" button and wait for the download to start.
          2. -
          3. Once the download is complete, you will get a ZIP file named "Bandicam 2.4.2.905 (2015) RePack Portable.zip". You will need to extract this file on your portable device using a software such as WinRAR or 7-Zip.
          4. -
          5. After extracting the file, you will get a folder named "Bandicam 2.4.2.905 (2015) RePack Portable". Inside this folder, you will find the executable file named "bdcam.exe". This is the file that you need to run to launch Bandicam 2.4.2.905 (2015) RePack Portable.
          6. -
          7. Double-click on the "bdcam.exe" file and you will see the Bandicam interface. You can now start using Bandicam 2.4.2.905 (2015) RePack Portable to record your screen activity.
          8. -
          -

          That's it! You have successfully downloaded and installed Bandicam 2.4.2.905 (2015) RePack Portable on your portable device.

          -

          How to Use Bandicam 2.4.2.905 (2015) RePack Portable

          -

          Now that you have Bandicam 2.4.2.905 (2015) RePack Portable on your portable device, you might be wondering how to use it to record your screen activity. Here are the steps to do so:

          -
            -
          1. Launch Bandicam 2.4.2.905 (2015) RePack Portable by double-clicking on the "bdcam.exe" file.
          2. -
          3. Select the recording mode that you want to use from the top menu bar. You can choose from "Screen Recording Mode", "Game Recording Mode", "Device Recording Mode", or "Webcam Overlay Mode".
          4. -
          5. Adjust the settings and options for your recording mode according to your preferences. You can change the video format, quality, frame rate, bitrate, audio settings, hotkeys, logo, mouse effects, real-time drawing, and more.
          6. -
          7. Select the area that you want to record by dragging the green rectangle on your screen or by entering the coordinates manually.
          8. -
          9. Click on the red "REC" button or press the hotkey (F12 by default) to start recording your screen activity.
          10. -
          11. Click on the blue "Stop" button or press the hotkey again to stop recording your screen activity.
          12. -
          13. You can find your recorded video in the output folder that you specified in the settings or by clicking on the "Open" button next to the "Stop" button.
          14. -
          15. You can preview, edit, or upload your recorded video using the built-in tools in Bandicam 2.4.2.905 (2015) RePack Portable or with other software of your choice.
          16. -
          -

          That's it! You have successfully used Bandicam 2.4.2.905 (2015) RePack Portable to record your screen activity.

          -

          Tips and Tricks for Bandicam 2.4.2.905 (2015) RePack Portable

          -

          To make the most out of Bandicam 2.4.2.905 (2015) RePack Portable, here are some tips and tricks that you can use:

          -
            -
          • Use a high-performance PC: Although Bandicam 2.4.2.905 (2015) RePack Portable does not require installation and has optimized performance and compatibility, it still needs a decent PC to run smoothly and record high-quality videos without lagging or crashing.
          • -
          • Use a large-capacity portable device: Since Bandicam 2.4.2.905 (2015) RePack Portable runs from your portable device, you will need enough space to store your recorded videos and other files related to Bandicam 2.4 .2.905 (2015) RePack Portable. You should use a portable device with a large capacity and fast transfer speed to avoid running out of space or slowing down your recording process.
          • -
          • Use the H.264 codec: The H.264 codec is one of the best codecs for video compression and quality. It can reduce the file size of your recorded videos without compromising the quality. You can select the H.264 codec in the video settings of Bandicam 2.4.2.905 (2015) RePack Portable.
          • -
          • Use the Game Recording Mode: If you want to record your gameplay, you should use the Game Recording Mode in Bandicam 2.4.2.905 (2015) RePack Portable. This mode can detect and record the game screen automatically and capture the game sound and your voice at the same time. You can also use the FPS overlay and control functions to monitor and adjust your game performance.
          • -
          • Use the Device Recording Mode: If you want to record your device screen such as a smartphone or a console, you should use the Device Recording Mode in Bandicam 2.4.2.905 (2015) RePack Portable. This mode can record your device screen via HDMI or USB connection and capture the device sound and your voice at the same time. You can also use the real-time drawing function to annotate or highlight something on your device screen.
          • -
          • Use the Webcam Overlay Mode: If you want to record your webcam along with your screen activity, you should use the Webcam Overlay Mode in Bandicam 2.4.2.905 (2015) RePack Portable. This mode can record your webcam video as a separate file or as an overlay on your screen video. You can also adjust the position, size, and opacity of your webcam overlay.
          • -
          • Use the Scheduled Recording Function: If you want to record something at a specific time and duration, you should use the Scheduled Recording Function in Bandicam 2.4.2.905 (2015) RePack Portable. This function can start and stop your recording automatically according to your settings. You can also set a daily or weekly schedule for your recording.
          • -
          -

          Conclusion

          -

          Bandicam 2.4.2.905 (2015) RePack Portable is a special version of Bandicam that does not require installation and can be run from any portable device such as a USB flash drive or an external hard drive.

          -

          It has many features, pros, and cons that make it different from other versions of Bandicam.

          -

          It is a great software for recording your screen activity for various purposes such as gaming, tutorial, presentation, or entertainment.

          -

          However, it also has some drawbacks such as possible compatibility issues, bugs, errors, limited editing options, limited customer support, and legal issues that you should be aware of before using it.

          -

          Therefore, we recommend that you download and use Bandicam 2.4.2.905 (2015) RePack Portable at your own risk and discretion.

          -

          We hope that this article has helped you understand what Bandicam 2.4 .2.905 (2015) RePack Portable is and how to use it to record your screen activity.

          -

          FAQs

          -

          Here are some of the frequently asked questions and answers about Bandicam 2.4.2.905 (2015) RePack Portable:

          -

          Q: Is Bandicam 2.4.2.905 (2015) RePack Portable safe to use?

          -

          A: Bandicam 2.4.2.905 (2015) RePack Portable is a modified version of Bandicam that has been repacked by a third-party developer to remove some unnecessary components and optimize the software for better performance and compatibility. However, this also means that it may have some bugs, errors, or compatibility issues that are not present in the official version of Bandicam. Moreover, it may also violate the terms and conditions of the original developer and cause some legal issues. Therefore, we advise you to use Bandicam 2.4.2.905 (2015) RePack Portable at your own risk and discretion.

          -

          Q: How can I update Bandicam 2.4.2.905 (2015) RePack Portable?

          -

          A: Bandicam 2.4.2.905 (2015) RePack Portable is not an official version of Bandicam and does not receive any updates or support from the original developer. Therefore, you cannot update Bandicam 2.4.2.905 (2015) RePack Portable to the latest version of Bandicam or fix any bugs or errors that may occur in it. If you want to use the latest version of Bandicam with all the features and functions, you will need to download and install the official version of Bandicam from the official website.

          -

          Q: How can I uninstall Bandicam 2.4.2.905 (2015) RePack Portable?

          -

          A: Bandicam 2.4.2.905 (2015) RePack Portable does not require installation and can be run from any portable device such as a USB flash drive or an external hard drive. Therefore, you do not need to uninstall Bandicam 2.4 .2.905 (2015) RePack Portable from your PC. You just need to delete the folder named "Bandicam 2.4.2.905 (2015) RePack Portable" from your portable device and empty the recycle bin. That's it! You have successfully uninstalled Bandicam 2.4.2.905 (2015) RePack Portable from your PC.

          -

          Q: How can I contact the developer of Bandicam 2.4.2.905 (2015) RePack Portable?

          -

          A: Bandicam 2.4.2.905 (2015) RePack Portable is not an official version of Bandicam and does not have any contact information or customer support from the original developer. Therefore, you cannot contact the developer of Bandicam 2.4.2.905 (2015) RePack Portable for any questions, feedback, or issues that you may have with the software. If you want to contact the original developer of Bandicam, you can visit their official website and use their contact form or email address.

          -

          Q: How can I record longer videos with Bandicam 2.4.2.905 (2015) RePack Portable?

          -

          A: Bandicam 2.4.2.905 (2015) RePack Portable does not have any limitations on the recording time or file size of your videos. You can record as long as you want or as large as you want with this version of Bandicam. However, you should keep in mind that the longer or larger your video is, the more disk space and memory it will consume on your portable device and PC. Therefore, you should make sure that you have enough space and resources to store and play your recorded videos.

          -

          b2dd77e56b
          -
          -
          \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Chery Spms V1 1 1 40.md b/spaces/stomexserde/gpt4-ui/Examples/Chery Spms V1 1 1 40.md deleted file mode 100644 index 0fe62b93ad4b388cd1fc4b6a788f9b7c6756767c..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Chery Spms V1 1 1 40.md +++ /dev/null @@ -1,13 +0,0 @@ -
          -

          Chery Spms V1 1 1 40: A Software for Chery Automobiles

          -

          Chery Spms V1 1 1 40 is a software developed by Chery, a Chinese automobile manufacturer, to provide diagnostic and tuning services for its vehicles. The software is used by professionals and enthusiasts who want to access and modify various parameters of the engine, transmission, suspension, brakes, airbags, and other systems of Chery cars.

          -

          The software can be downloaded from the official website of Chery or from third-party sources such as Kit[^1^] or Software Informer[^2^]. The software requires a compatible interface device that connects to the OBD-II port of the vehicle and communicates with the software via USB or Bluetooth. The software supports various models of Chery vehicles, such as Tiggo, QQ, Arrizo, Fulwin, and others.

          -

          Chery Spms V1 1 1 40


          Downloadhttps://urlgoal.com/2uI9xy



          -

          Chery Spms V1 1 1 40 allows users to perform various functions, such as reading and clearing fault codes, viewing live data and graphs, performing tests and adaptations, coding and programming modules, resetting service intervals, and more. The software also has a user-friendly interface that displays the information in an organized and clear manner. The software can also be customized according to the user's preferences and needs.

          -

          Chery Spms V1 1 1 40 is a useful tool for anyone who owns or works with Chery vehicles. It can help diagnose problems, optimize performance, and personalize features of the car. However, users should be careful when using the software, as improper use may cause damage to the vehicle or void the warranty. Users should also make sure they have the latest version of the software and follow the instructions carefully.

          - -

          Chery Spms V1 1 1 40 has received positive reviews from users who have tried it. They have praised the software for its ease of use, functionality, and compatibility. They have also reported that the software has helped them solve various issues with their vehicles, such as poor fuel economy, rough idle, engine misfire, and ABS warning light. Some users have also shared their experiences of using the software to customize their vehicles, such as changing the speedometer display, adjusting the steering wheel angle, and activating the daytime running lights.

          -

          However, some users have also encountered some problems with the software. Some of the common complaints include difficulty in installing the software, errors in connecting to the vehicle, lack of support for some models or features, and risk of damaging the vehicle if not used properly. Some users have also warned others about downloading the software from unreliable sources, as they may contain viruses or malware that can harm the computer or the vehicle. Therefore, users should be careful when choosing where to download the software and how to use it.

          -

          Chery Spms V1 1 1 40 is a software that can benefit Chery vehicle owners and enthusiasts who want to access and modify their cars. The software can provide valuable information and functions that can help diagnose, optimize, and personalize the vehicle. However, users should also be aware of the potential risks and limitations of the software and use it with caution and responsibility.

          81aa517590
          -
          -
          \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Free Photoshop Liquify Tool Download.md b/spaces/stomexserde/gpt4-ui/Examples/Free Photoshop Liquify Tool Download.md deleted file mode 100644 index 557a6e379da0547d283f5289a898f43603fb3c18..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Free Photoshop Liquify Tool Download.md +++ /dev/null @@ -1,40 +0,0 @@ -
          -

          Free Photoshop Liquify Tool Download: How to Use It to Enhance Your Photos

          -

          If you want to create stunning effects on your photos, such as distorting, warping, or reshaping them, you need to try the Liquify tool in Photoshop. This tool lets you push, pull, and swirl pixels as if they were liquid, giving you amazing control over the appearance of your images. You can use it to adjust faces, touch up clothes and hair, and make artistic transformations.

          -

          Free Photoshop Liquify Tool Download


          DOWNLOAD 🆓 https://urlgoal.com/2uI8NN



          -

          In this article, we will show you how to download the Liquify tool for free, and how to use it effectively to enhance your photos. You will also learn about some of the advanced features of the Liquify tool, such as Face-Aware Liquify and Smart Filters.

          -

          How to Download the Liquify Tool for Free

          -

          The Liquify tool is part of Photoshop, which is a powerful photo editing software from Adobe. You can download Photoshop for free as a trial version for 7 days from Adobe's website[^1^]. After that, you will need to pay a monthly subscription fee to continue using it.

          -

          To download Photoshop for free, follow these steps:

          -
            -
          1. Go to https://www.adobe.com/products/photoshop/free-trial-download.html and click on "Download free trial".
          2. -
          3. Sign in with your Adobe ID or create one if you don't have one.
          4. -
          5. Follow the instructions on the screen to install Photoshop on your computer.
          6. -
          7. Launch Photoshop and start using the Liquify tool.
          8. -
          -

          How to Use the Liquify Tool in Photoshop

          -

          To use the Liquify tool in Photoshop, follow these steps:

          -
            -
          1. Open the photo you want to edit in Photoshop.
          2. -
          3. Go to Filter > Liquify or press Shift+Ctrl+X (Windows) or Shift+Command+X (Mac) to open the Liquify dialog box.
          4. -
          5. Select a tool from the toolbar on the left side of the dialog box. You can choose from different tools that let you warp, bloat, pucker, twirl, or push pixels in different directions.
          6. -
          7. Adjust the settings of the tool from the options bar on the right side of the dialog box. You can change the brush size, density, pressure, rate, and more.
          8. -
          9. Click and drag on the photo to apply the effect of the tool. You can use the Zoom and Hand tools to magnify or navigate in the photo.
          10. -
          11. If you make a mistake, you can use the Reconstruct tool to undo or smooth out your changes.
          12. -
          13. When you are happy with your result, click OK to apply the Liquify filter to your photo.
          14. -
          -

          How to Use Advanced Features of the Liquify Tool

          -

          The Liquify tool has some advanced features that can help you achieve more realistic and precise results. Here are some of them:

          -

          -

          Face-Aware Liquify

          -

          If you are working on a portrait photo, you can use the Face-Aware Liquify feature to automatically detect and adjust facial features. This feature lets you change the shape and expression of eyes, nose, mouth, chin, forehead, and more with ease.

          -

          To use Face-Aware Liquify, follow these steps:

          -
            -
          1. Open a portrait photo in Photoshop.
          2. -
          3. Go to Filter > Liquify or press Shift+Ctrl+X (Windows) or Shift+Command+X (Mac) to open the Liquify dialog box.
          4. -
          5. Select Face Tool from the toolbar on the left side of the dialog box. You will see a face icon on each detected face in the photo.
          6. -
          7. Select a face from the Face-Aware Liquify menu on the right side of the dialog box. You can also click on a face icon in the photo to select it.
          8. -
          9. Use the sliders under Eyes, Nose, Mouth, Face Shape, etc. to adjust each facial feature. You can also drag on the face icons in the photo to move or resize them.
          10. -
          11. When you are

            81aa517590
            -
            -
            \ No newline at end of file diff --git a/spaces/studiobrn/SplitTrack/audiocraft/modules/lstm.py b/spaces/studiobrn/SplitTrack/audiocraft/modules/lstm.py deleted file mode 100644 index c0866175950c1ca4f6cca98649525e6481853bba..0000000000000000000000000000000000000000 --- a/spaces/studiobrn/SplitTrack/audiocraft/modules/lstm.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from torch import nn - - -class StreamableLSTM(nn.Module): - """LSTM without worrying about the hidden state, nor the layout of the data. - Expects input as convolutional layout. - """ - def __init__(self, dimension: int, num_layers: int = 2, skip: bool = True): - super().__init__() - self.skip = skip - self.lstm = nn.LSTM(dimension, dimension, num_layers) - - def forward(self, x): - x = x.permute(2, 0, 1) - y, _ = self.lstm(x) - if self.skip: - y = y + x - y = y.permute(1, 2, 0) - return y diff --git a/spaces/sub314xxl/HairCLIP/README.md b/spaces/sub314xxl/HairCLIP/README.md deleted file mode 100644 index 8e59a77ce4a4e432b28c072670af00b51b4d6a0c..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/HairCLIP/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: HairCLIP -emoji: ⚡ -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -suggested_hardware: t4-small -duplicated_from: Gradio-Blocks/HairCLIP ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference - -https://arxiv.org/abs/2112.05142 diff --git a/spaces/subhajitmaji/MusicGen/tests/utils/__init__.py b/spaces/subhajitmaji/MusicGen/tests/utils/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/subhajitmaji/MusicGen/tests/utils/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/sunmaiyyyy/combined-GI-RVC-model/infer_pack/commons.py b/spaces/sunmaiyyyy/combined-GI-RVC-model/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/sunmaiyyyy/combined-GI-RVC-model/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Elsa 3.5 Audi Vw Data Free Download VERIFIED.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Elsa 3.5 Audi Vw Data Free Download VERIFIED.md deleted file mode 100644 index 81a69621cde0a8ff7f590dfb31ecbf1d74c8454f..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Elsa 3.5 Audi Vw Data Free Download VERIFIED.md +++ /dev/null @@ -1,6 +0,0 @@ -

            elsa 3.5 audi vw data free download


            DOWNLOADhttps://cinurl.com/2uEX51



            - -Download 1 free sheet music and scores:Mazurka Appassionata - Barrios, Sheet music, ... Download a FREE pdf of ... elsa 3.5 audi vw data free download. 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Agra Ka Daabra LINK Full Movie 720p.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Agra Ka Daabra LINK Full Movie 720p.md deleted file mode 100644 index c22023b76091030ac3d59bd31ded626e436d97f9..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Agra Ka Daabra LINK Full Movie 720p.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Agra Ka Daabra Full Movie 720p


            Downloadhttps://urluss.com/2uCDR4



            -
            -Apr 12, 2018 ... Bollywood Hindi Full Movies Download Telugu HD Mp4 Single Part Movies . Bollywood Movie Download,. Agra Ka Daabra . cdec1852d8 . 1fdad05405
            -
            -
            -

            diff --git a/spaces/svjack/Question-Generator/app.py b/spaces/svjack/Question-Generator/app.py deleted file mode 100644 index dab710af5b09d22e27d9db13223f242898a61710..0000000000000000000000000000000000000000 --- a/spaces/svjack/Question-Generator/app.py +++ /dev/null @@ -1,31 +0,0 @@ -from qa_on_context import * - -import gradio as gr - -example_sample = [ - "Margarita with a Straw is a 2014 Indian Hindi-language drama film directed by Shonali Bose. It stars Kalki Koechlin (pictured) as an Indian teenager with cerebral palsy who relocates to America for her undergraduate education and comes of age following her complex relationship with a blind girl", - "飓风格特是1993年9月在墨西哥和整个中美洲引发严重洪灾的大规模热带气旋,源于9月14日西南加勒比海上空一股东风波。次日从尼加拉瓜登岸,经过洪都拉斯后于9月17日在洪都拉斯湾再次达到热带风暴标准,但次日进入伯利兹上空后就减弱成热带低气压。穿过尤卡坦半岛后,在9月20日强化成二级飓风,从韦拉克鲁斯州的图斯潘附近登陆墨西哥。9月21日从纳亚里特州进入太平洋时已降级成热带低气压,最终于5天后在开放水域上空消散。", -] - -def demo_func(text): - assert type(text) == type("") - req = [] - output = gen_qst_to_df(text) - for ele in output: - if hasattr(ele, "size"): - req.append(ele.values.tolist()) - return {"output": req} - - -demo = gr.Interface( - fn=demo_func, - inputs="text", - outputs="json", - title=f"Question generate 🍩 demonstration", - description = 'This _example_ was **drive** from

            [https://github.com/svjack/docvqa-gen](https://github.com/svjack/docvqa-gen)

            \n', - examples=example_sample if example_sample else None, - cache_examples = False - ) - -#demo.launch(server_name=None, server_port=None) -demo.launch() diff --git a/spaces/taesiri/ChatGPT-ImageCaptioner/tools/unzip_imagenet_lvis.py b/spaces/taesiri/ChatGPT-ImageCaptioner/tools/unzip_imagenet_lvis.py deleted file mode 100644 index 56ccad1a9024f425951ae025182fb709d2effcab..0000000000000000000000000000000000000000 --- a/spaces/taesiri/ChatGPT-ImageCaptioner/tools/unzip_imagenet_lvis.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import os -import argparse - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--src_path', default='datasets/imagenet/ImageNet-21K/') - parser.add_argument('--dst_path', default='datasets/imagenet/ImageNet-LVIS/') - parser.add_argument('--data_path', default='datasets/imagenet_lvis_wnid.txt') - args = parser.parse_args() - - f = open(args.data_path) - for i, line in enumerate(f): - cmd = 'mkdir {x} && tar -xf {src}/{l}.tar -C {x}'.format( - src=args.src_path, - l=line.strip(), - x=args.dst_path + '/' + line.strip()) - print(i, cmd) - os.system(cmd) diff --git a/spaces/tang155/bingo/src/components/chat.tsx b/spaces/tang155/bingo/src/components/chat.tsx deleted file mode 100644 index a37ab1cc96ca2e6bfd9acbe313a8d946bfd5c3d4..0000000000000000000000000000000000000000 --- a/spaces/tang155/bingo/src/components/chat.tsx +++ /dev/null @@ -1,93 +0,0 @@ -'use client' - -import { useCallback, useEffect, useMemo, useState } from 'react' -import { useAtom } from 'jotai' -import Image from 'next/image' -import { cn } from '@/lib/utils' -import { ChatList } from '@/components/chat-list' -import { ChatPanel } from '@/components/chat-panel' -import { WelcomeScreen } from '@/components/welcome-screen' -import { ChatScrollAnchor } from '@/components/chat-scroll-anchor' -import { ToneSelector } from './tone-selector' -import { ChatHeader } from './chat-header' -import { ChatSuggestions } from './chat-suggestions' -import { bingConversationStyleAtom } from '@/state' -import { ButtonScrollToBottom } from '@/components/button-scroll-to-bottom' -import StopIcon from '@/assets/images/stop.svg' -import { useBing } from '@/lib/hooks/use-bing' -import { ChatMessageModel } from '@/lib/bots/bing/types' -import { ChatNotification } from './chat-notification' -import { Settings } from './settings' -import { ChatHistory } from './chat-history' - -export type ChatProps = React.ComponentProps<'div'> & { initialMessages?: ChatMessageModel[] } - -export default function Chat({ className }: ChatProps) { - - const [bingStyle, setBingStyle] = useAtom(bingConversationStyleAtom) - const { - messages, - sendMessage, - resetConversation, - stopGenerating, - setInput, - bot, - input, - generating, - isSpeaking, - uploadImage, - attachmentList, - setAttachmentList, - } = useBing() - - useEffect(() => { - window.scrollTo({ - top: document.body.offsetHeight, - behavior: 'smooth' - }) - }, []) - - return ( -
            - -
            - - - - {messages.length ? ( - <> - - - - - - {generating ? ( -
            - -
            - ) : null} - - ) : null} -
            - - -
            - ) -} diff --git a/spaces/tbvl/Fake_Face_Detection/utils/__init__.py b/spaces/tbvl/Fake_Face_Detection/utils/__init__.py deleted file mode 100644 index 0aba17d1083777228b66c972014a5b3419bce66c..0000000000000000000000000000000000000000 --- a/spaces/tbvl/Fake_Face_Detection/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -import os \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/AttackonpearlharborTOP Crackfreedownload.md b/spaces/terfces0erbo/CollegeProjectV2/AttackonpearlharborTOP Crackfreedownload.md deleted file mode 100644 index af82945c083967585ec201c759c4f6f55b0c812d..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/AttackonpearlharborTOP Crackfreedownload.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Attackonpearlharborcrackfreedownload


            Download Zip »»» https://bytlly.com/2uGiZd



            -
            -... (2018) Sujet du message: Humax Prhd2000c Neue Firmware ... 3801dad6d7. Isps Code Arabic.pdf · Attackonpearlharborcrackfreedownload 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/terfces0erbo/CollegeProjectV2/Colasoft Capsa Enterprise 7.7 Crack ((FREE)).md b/spaces/terfces0erbo/CollegeProjectV2/Colasoft Capsa Enterprise 7.7 Crack ((FREE)).md deleted file mode 100644 index 3ef980287baae270d848efe1763abff89c7a73a0..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Colasoft Capsa Enterprise 7.7 Crack ((FREE)).md +++ /dev/null @@ -1,6 +0,0 @@ -

            Colasoft Capsa Enterprise 7.7 Crack


            DOWNLOAD » https://bytlly.com/2uGlWd



            -
            -Colasoft Capsa Network Analyzer, free and safe download. Colasoft Capsa Network Analyzer latest version: Powerful network monitor and analyzer. 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/terfces0erbo/CollegeProjectV2/Colectia Arborele Lumii Pdf 14.md b/spaces/terfces0erbo/CollegeProjectV2/Colectia Arborele Lumii Pdf 14.md deleted file mode 100644 index 2b86aeffdecdf3073aa909057744d60cf8cf1ecc..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Colectia Arborele Lumii Pdf 14.md +++ /dev/null @@ -1,19 +0,0 @@ -
            -```html -

            Colectia Arborele Lumii: o revista enciclopedica de cultura generala

            -

            Colectia Arborele Lumii este o revista enciclopedica de cultura generala, in limba romana, care a fost publicata saptamanal pe parcursul a mai multi ani. Revista continea articole despre istorie, geografie, stiinta, arta, religie, literatura si alte domenii de interes. Fiecare numar al revistei avea un subiect principal si mai multe subiecte secundare, ilustrate cu fotografii, desene si harti. Revista era destinata unui public larg, de la elevi si studenti pana la adulti si pensionari.

            -

            Colectia Arborele Lumii Pdf 14


            Download Zip === https://bytlly.com/2uGlsu



            -

            Colectia Arborele Lumii a aparut pentru prima data in anul 1992, sub egida Editurii Enciclopedice Romane. Initial, revista avea 16 pagini si era tiparita pe hartie de proasta calitate. Ulterior, revista si-a imbunatatit aspectul grafic si a ajuns sa aiba 32 de pagini, pe hartie lucioasa. Revista a fost distribuita prin abonament sau prin vanzare la chioscuri. Pretul unui numar era de 500 de lei in anul 1992 si a crescut treptat pana la 10.000 de lei in anul 1999.

            -

            Colectia Arborele Lumii a fost una dintre cele mai populare si apreciate reviste de cultura generala din Romania anilor '90. Revista a avut un tiraj mediu de 100.000 de exemplare pe numar si a fost citita de milioane de romani. Revista a contribuit la educatia si informarea cititorilor sai, oferindu-le cunostinte variate si interesante despre lumea in care traiesc. Revista a fost si o sursa de inspiratie pentru multi autori de carti, articole sau proiecte scolare.

            -

            Colectia Arborele Lumii a incetat sa mai apara in anul 2000, din cauza lipsei de fonduri si a scaderii cererii pe piata. Ultimul numar al revistei a fost dedicat mileniului III si a facut un bilant al celor mai importante evenimente si personalitati ale mileniului II. Revista a lasat in urma o mostenire valoroasa pentru cultura romana: peste 400 de numere care cuprind mii de articole despre diverse subiecte. Aceste numere pot fi gasite in biblioteci sau pe internet, sub forma de pdf-uri.

            -

            Colectia Arborele Lumii este o revista care merita sa fie redescoperita si recitita de catre cei care vor sa isi largesca orizonturile si sa isi imbogateasca cultura generala. Revista este o adevarata comoara de cunostinte, care poate fi utila si placuta pentru oricine.[^1^] [^2^]

            -

            -``` - -```html -

            Unul dintre subiectele principale ale Colectiei Arborele Lumii a fost istoria. Revista a prezentat cronologic si tematic istoria universala si istoria Romaniei, de la originile umanitatii pana la sfarsitul secolului XX. Revista a abordat atat evenimentele politice, economice si sociale, cat si aspectele culturale, religioase si artistice ale diferitelor epoci si civilizatii. Revista a oferit si portrete ale unor personalitati istorice remarcabile, precum Alexandru cel Mare, Iulius Cezar, Carol cel Mare, Napoleon Bonaparte, Mihai Viteazul, Stefan cel Mare, Mircea cel Batran, Vlad Tepes, Constantin Brancoveanu, Nicolae Balcescu, Mihail Kogalniceanu, Alexandru Ioan Cuza, Carol I, Ferdinand I, Nicolae Iorga, Ion Antonescu, Gheorghe Gheorghiu-Dej, Nicolae Ceausescu sau Ion Iliescu.

            -

            Alt subiect important al Colectiei Arborele Lumii a fost geografia. Revista a explorat geografia fizica si geografia umana a tuturor continentelor si tarilor lumii. Revista a descris relieful, clima, vegetatia, fauna, resursele naturale, populatia, cultura, traditiile, economia si politica fiecarei regiuni sau natiuni. Revista a oferit si informatii despre orasele cele mai mari sau cele mai frumoase ale lumii, precum Paris, Londra, Roma, New York, Tokyo, Beijing sau Bucuresti. Revista a inclus si harti detaliate si actualizate ale lumii sau ale unor zone de interes.

            -

            Un alt subiect de interes al Colectiei Arborele Lumii a fost stiinta. Revista a prezentat descoperirile si inventiile stiintifice din domenii precum astronomia, fizica, chimia, biologia, medicina, informatica sau inginerie. Revista a explicat fenomene naturale sau artificiale care afecteaza viata oamenilor sau a planetei. Revista a oferit si biografii ale unor savanti celebri sau contemporani care au contribuit la progresul stiintei si tehnologiei. Printre acestia se numara Galileo Galilei, Isaac Newton, Albert Einstein, Marie Curie, Louis Pasteur, Charles Darwin, Alexander Fleming, Thomas Edison, Nikola Tesla sau Tim Berners-Lee.

            -```

            d5da3c52bf
            -
            -
            \ No newline at end of file diff --git a/spaces/textToSQL/talk_to_NP/app.py b/spaces/textToSQL/talk_to_NP/app.py deleted file mode 100644 index 1d0cdae4f08e069559f32d19ee5886c7be33a072..0000000000000000000000000000000000000000 --- a/spaces/textToSQL/talk_to_NP/app.py +++ /dev/null @@ -1,124 +0,0 @@ -import whisper -import gradio as gr -import openai -import os - -openai.api_key = os.environ["OPENAI_API_KEY"] - -model = whisper.load_model("small") - - -def transcribe(audio): - model = whisper.load_model("base") - result = model.transcribe(audio) - return result["text"] - -# def transcribe(audio): - -# #time.sleep(3) -# # load audio and pad/trim it to fit 30 seconds -# audio = whisper.load_audio(audio) -# audio = whisper.pad_or_trim(audio) - -# # make log-Mel spectrogram and move to the same device as the model -# mel = whisper.log_mel_spectrogram(audio).to(model.device) - -# # detect the spoken language -# _, probs = model.detect_language(mel) -# print(f"Detected language: {max(probs, key=probs.get)}") - -# # decode the audio -# options = whisper.DecodingOptions(fp16 = False) -# result = whisper.decode(model, mel, options) -# return result.text - - -def process_text(input_text): - # Apply your function here to process the input text - output_text = input_text.upper() - return output_text - -def get_completion(prompt, model='gpt-3.5-turbo'): - messages = [ - {"role": "system", "content": """You are a world class nurse practitioner. You are provided with the transcription following a patient's visit. \ - Extract the following information from the transcription, replace curly brackets with relevant extracted information, and present as follows, one category per line: \ - - Date of Visit: {} - Claimant: {} - Client/Employer: {} - Claim #: {} - DOI (Date of Injury): {} - Provider: {} - Diagnosis Treated: {} - Subjective findings: {} - Objective Findings: {} - Treatment plan: {} - Medications: {} - RTW (Return to Work) Status: {} - Restrictions: {} - NOV (Next Office Visit): {} - - Only use the information from the provided transcription. Do not make up stuff. If information is not available just put "N/A" next to the relevant line. - """ - }, - {"role": "user", "content": prompt} - ] - response = openai.ChatCompletion.create( - model = model, - messages = messages, - temperature = 0, - - ) - return response.choices[0].message['content'] - -with gr.Blocks() as demo: - - gr.Markdown(""" - # Chat with NP
            - - This is to make life of NPs easier. - Record post visit summary in natural language, press "transcribe audio", and then "prepare a report". - """) - - - title = "Chat with NP" - audio = gr.Audio(source="microphone", type="filepath") - - b1 = gr.Button("Transcribe audio") - b2 = gr.Button("Prepare a report") - - - text1 = gr.Textbox(lines=5) - text2 = gr.Textbox(lines=5) - - prompt = text1 - - - - b1.click(transcribe, inputs=audio, outputs=text1) - b2.click(get_completion, inputs=text1, outputs=text2) - - - # b1.click(transcribe, inputs=audio, outputs=text1) - # b2.click(get_completion, inputs=prompt, outputs=text2) - - - -demo.launch() - -#demo.launch(share=True, auth=("username", "password")) - -# In this example, the process_text function just converts the input text to uppercase, but you can replace it with your desired function. The Gradio Blocks interface will have two buttons: "Transcribe audio" and "Process text". The first button transcribes the audio and fills the first textbox, and the second button processes the text from the first textbox and fills the second textbox. - - -# gr.Interface( -# title = 'OpenAI Whisper ASR Gradio Web UI', -# fn=transcribe, -# inputs=[ -# gr.inputs.Audio(source="microphone", type="filepath") -# ], -# outputs=[ -# "textbox" -# ], - -# live=True).launch() diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Adrian Majuru Bucurestii Mahalalelor Pdf Downloadl WORK.md b/spaces/tialenAdioni/chat-gpt-api/logs/Adrian Majuru Bucurestii Mahalalelor Pdf Downloadl WORK.md deleted file mode 100644 index 3621d9c7cd2b4a37b6d5c20218db261fa2278442..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Adrian Majuru Bucurestii Mahalalelor Pdf Downloadl WORK.md +++ /dev/null @@ -1,26 +0,0 @@ - -

            How to Download Adrian Majuru's Bucurestii Mahalalelor in PDF Format

            -

            If you are interested in learning more about the history and culture of Bucharest, the capital of Romania, you might want to read Adrian Majuru's Bucurestii Mahalalelor. This book is a collection of essays that explore the social and urban transformations of Bucharest from the 19th century to the present day, focusing on the marginal and diverse neighborhoods that shaped the city's identity.

            -

            However, finding a copy of this book in PDF format can be challenging, as it is not widely available online. That's why we have prepared this guide to help you download Adrian Majuru's Bucurestii Mahalalelor in PDF format for free.

            -

            Adrian Majuru Bucurestii Mahalalelor Pdf Downloadl


            Download ===> https://urlcod.com/2uK8f1



            -

            Step 1: Visit the Online Library Website

            -

            The first step to download Adrian Majuru's Bucurestii Mahalalelor in PDF format is to visit the online library website. This website is a platform that offers access to thousands of books in various languages and genres, including history, literature, art, science, and more. You can browse the catalog by category, author, title, or keyword.

            -

            To visit the online library website, click on this link: https://onlinelibrary.com

            -

            Step 2: Search for Adrian Majuru's Bucurestii Mahalalelor

            -

            The next step to download Adrian Majuru's Bucurestii Mahalalelor in PDF format is to search for the book on the online library website. You can use the search bar at the top of the page and type in the keyword "Adrian Majuru Bucurestii Mahalalelor". Alternatively, you can use the advanced search option and filter by language, publication date, or ISBN.

            -

            Once you have entered your search query, you will see a list of results that match your criteria. Look for the book that has the title "Bucurestii Mahalalelor" and the author "Adrian Majuru". You can also check the cover image and the description to make sure it is the right book.

            -

            Step 3: Download Adrian Majuru's Bucurestii Mahalalelor in PDF Format

            -

            The final step to download Adrian Majuru's Bucurestii Mahalalelor in PDF format is to click on the download button next to the book. You will be redirected to a page where you can choose your preferred format and device. Select PDF as your format and click on the download button again.

            -

            You will then see a pop-up window that asks you to complete a short survey or offer before you can access your file. This is a security measure to prevent bots and spam. Simply follow the instructions on the screen and complete one of the options. It will only take a few minutes of your time.

            -

            After you have completed the survey or offer, you will receive a confirmation message and a link to download your file. Click on the link and save your file to your device. You can then open it with any PDF reader and enjoy reading Adrian Majuru's Bucurestii Mahalalelor.

            -

            -

            Conclusion

            -

            Adrian Majuru's Bucurestii Mahalalelor is a fascinating book that reveals the hidden stories and secrets of Bucharest's neighborhoods. If you want to download this book in PDF format for free, you can follow these three simple steps:

            -
              -
            • Visit the online library website at https://onlinelibrary.com
            • -
            • Search for Adrian Majuru's Bucurestii Mahalalelor using the keyword "Adrian Majuru Bucurestii Mahalalelor"
            • -
            • Download Adrian Majuru's Bucurestii Mahalalelor in PDF format by completing a short survey or offer
            • -
            -

            We hope this guide was helpful and that you enjoy reading Adrian Majuru's Bucurestii Mahalalelor. If you have any questions or feedback, please leave a comment below.

            e93f5a0c3f
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Cracked Version of CorelDRAW 2022 Is It Worth It?.md b/spaces/tialenAdioni/chat-gpt-api/logs/Cracked Version of CorelDRAW 2022 Is It Worth It?.md deleted file mode 100644 index bcdb24d99aed3d4b79ddc6be688a1901250e8e53..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Cracked Version of CorelDRAW 2022 Is It Worth It?.md +++ /dev/null @@ -1,22 +0,0 @@ - -

            How to Crack CorelDRAW 2022 and Enjoy Its Features for Free

            -

            CorelDRAW is a popular vector graphics software that allows you to create stunning designs, logos, illustrations, and more. It has many powerful tools and features that can help you unleash your creativity and productivity. However, CorelDRAW is not a cheap software. The latest version, CorelDRAW 2022, costs $785 for a perpetual license or $249 for an annual subscription. If you are looking for a way to crack CorelDRAW 2022 and use it for free, you may be tempted to download a cracked version from the internet. But is it worth it? In this article, I will tell you why you should avoid using a cracked version of CorelDRAW 2022 and what are the best alternatives to get this software legally and safely.

            -

            crack version corel draw


            Download Zip 🆗 https://urlcod.com/2uK1RY



            -

            What is a Cracked Version of CorelDRAW 2022?

            -

            A cracked version of CorelDRAW 2022 is a modified version of the original software that bypasses its activation process and allows you to use it without paying for a license. Usually, a cracked version of CorelDRAW 2022 comes with a keygen or an activator that generates a fake serial number or patches the software files to make it think that it is registered.

            -

            There are many websites that offer cracked versions of CorelDRAW 2022 with download links or torrent magnets. Some examples are YASIR252, FileCR, FixThePhoto, Reddit, and so on. These websites claim that their cracked versions of CorelDRAW 2022 are fully functional and safe to use. However, this is not true.

            -

            Why You Should Avoid Using a Cracked Version of CorelDRAW 2022?

            -

            Using a cracked version of CorelDRAW 2022 may seem like a good idea to save money and enjoy its features for free, but it comes with many risks and disadvantages. Here are some of the reasons why you should avoid using a cracked version of CorelDRAW 2022:

            -
              -
            • It is illegal: Cracking CorelDRAW 2022 is a violation of its terms of service and copyright laws. You are stealing the intellectual property of the developers and depriving them of their rightful income. If you are caught using a cracked version of CorelDRAW 2022, you may face legal consequences such as fines or lawsuits.
            • -
            • It is unsafe: Cracked versions of CorelDRAW 2022 may contain viruses, malware, spyware, or ransomware that can harm your computer or steal your personal information. These malicious programs can infect your system, corrupt your files, damage your hardware, or expose your data to hackers or cybercriminals.
            • -
            • It is unreliable: Cracked versions of CorelDRAW 2022 may not work properly or at all. They may have missing features, bugs, errors, crashes, or compatibility issues with your operating system or other software. They may also stop working after an update or require constant reactivation.
            • -
            • It is unethical: Cracking CorelDRAW 2022 is unfair to the developers who work hard to create and improve this software. It also affects the quality and innovation of the software industry as a whole. By using a cracked version of CorelDRAW 2022, you are discouraging the developers from continuing their work and supporting their customers.
            • -
            -

            What Are the Best Alternatives to Get CorelDRAW 2022 Legally and Safely?

            -

            If you want to use CorelDRAW 2022 without breaking the law or risking your security, there are some better alternatives than using a cracked version. Here are some of the best options to get CorelDRAW 2022 legally and safely:

            -
              -
            • Free Trial: You can download a free trial version of CorelDRAW 2022 from its official website https://www.coreldraw.com/en/free-trials/. The free trial lasts for 15 days and gives you access to all the features and functions of the software. You can use this option to test the software before buying it or for short-term projects.
            • -
            • Discounts: You can get discounts on CorelDRAW 2022 if you are eligible for certain categories such

              ddb901b051
              -
              -
              \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Download TeamViewer Host v15.3.30 for Secure and Reliable Remote Support.md b/spaces/tialenAdioni/chat-gpt-api/logs/Download TeamViewer Host v15.3.30 for Secure and Reliable Remote Support.md deleted file mode 100644 index eaa59903dbee19064e5749d0120d015ff86c1a7f..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Download TeamViewer Host v15.3.30 for Secure and Reliable Remote Support.md +++ /dev/null @@ -1,77 +0,0 @@ - -

              How to Download TeamViewer Host v15.3.30

              | |

              Do you want to access and manage unattended Android devices remotely? Whether you need to monitor digital signage, kiosks, point-of-sale systems, or mobile phones and tablets, TeamViewer Host v15.3.30 can help you do that easily and securely.

              -

              Download TeamViewer Host v15.3.30


              DOWNLOAD >>> https://urlcod.com/2uKaDC



              | |

              In this article, I'll show you what TeamViewer Host is, what benefits it offers, what devices and platforms it supports, and how to download, install, and use it.

              | |

              What is TeamViewer Host?

              | |

              TeamViewer Host is a software that allows you to permanently access and control unattended Android devices from any computer running TeamViewer. It is the only remote connectivity solution that offers full unattended access and control for more than 100 brands of Android-based devices.

              | |

              With TeamViewer Host, you can run updates, make configuration changes, and transfer files remotely, saving time and travel. You can also monitor device status, battery level, network connection, and location. You can even remotely lock or wipe devices in case of theft or loss.

              | |

              Benefits of TeamViewer Host

              | |
              • Easy setup: You can install TeamViewer Host on any Android device in minutes and assign it to your TeamViewer account for unattended access.
              • Secure connection: You can rely on TeamViewer's end-to-end encryption, two-factor authentication, whitelisting, and other security features to protect your data and devices.
              • Flexible pricing: You can choose from different subscription plans based on your needs and budget. You can also try TeamViewer Host for free for 14 days.
              | |

              Supported devices and platforms

              | |

              TeamViewer Host supports Android devices running Android 4.4 or higher. It also supports commercial-grade devices like digital signage, kiosks, or point-of-sale systems from various manufacturers.

              | |

              You can connect to unattended Android devices from any computer running Windows, Mac, or Linux with TeamViewer installed. You can also use the TeamViewer app on your smartphone or tablet to access unattended Android devices on the go.

              | |

              How to download and install TeamViewer Host v15.3.30

              | |

              You can download TeamViewer Host v15.3.30 from the official website or from Google Play Store. Here are the steps for both methods:

              | |

              Download from the official website

              | |
              1. Go to https://www.teamviewer.com/en-us/info/teamviewer-host/ on your browser.
              2. Click on the "Download" button under "TeamViewer Host".
              3. The download will start automatically. If not, click on the "click here" link to start it manually.
              4. Once the download is complete, open the file and follow the instructions to install TeamViewer Host on your Android device.
              | |

              Download from Google Play Store

              | |
              1. Open Google Play Store on your Android device.
              2. Search for "TeamViewer Host" in the search bar.
              3. Select "TeamViewer Host" from the search results and tap on "Install".
              4. Wait for the installation to finish and then open the app.
              | |

              Customize the host module

              | |

              If you want to customize the appearance and behavior of TeamViewer Host on your Android device, you can use the Custom Device Information feature. This feature allows you to:

              |
              • Add your own logo and text to the host module.
              • Show or hide certain elements of the host module.
              • Add custom fields for device information.
              • Create different profiles for different device types.
              -

              To use this feature, you need to:

              -

              How to download TeamViewer Host for unattended access
              -TeamViewer Host download for Android devices
              -Download TeamViewer Host module for Windows
              -TeamViewer Host latest version 15.3.30 free download
              -Download TeamViewer Host for remote support and control
              -TeamViewer Host 15.3.30 download for digital signage and POS
              -Download TeamViewer Host x86-32bit for Windows
              -TeamViewer Host download for commercial-grade devices
              -Download TeamViewer Host and QuickSupport for Android
              -TeamViewer Host 15.3.30 changelog and features
              -Download TeamViewer Host for Windows from Filehippo.com[^3^]
              -TeamViewer Host download for kiosks and tablets
              -Download TeamViewer Host x86-64bit for Windows
              -TeamViewer Host 15.3.30 system requirements and compatibility
              -Download TeamViewer Host and Remote for Windows
              -TeamViewer Host download for more than 100 brands of Android devices[^2^]
              -Download TeamViewer Host MSI package for mass deployment[^1^]
              -TeamViewer Host 15.3.30 review and rating
              -Download TeamViewer Host for Windows from teamviewer.com[^1^]
              -TeamViewer Host download for permanent access and management
              -Download TeamViewer Host 15.3.30 offline installer
              -TeamViewer Host download for resolving issues quickly from anywhere
              -Download TeamViewer Host and Meeting for Windows
              -TeamViewer Host 15.3.30 license and pricing
              -Download TeamViewer Host for Windows from softonic.com
              -TeamViewer Host download for full unattended access and control[^2^]
              -Download TeamViewer Host 15.3.30 portable version
              -TeamViewer Host download for secure remote connectivity solution
              -Download TeamViewer Host and QuickJoin for Windows
              -TeamViewer Host 15.3.30 tutorial and guide

              -
              1. Login to your TeamViewer account on https://login.teamviewer.com/.
              2. Select "Design & Deploy" from the menu on the left.
              3. Select "Custom Device Information" from the submenu.
              4. Create a new profile or edit an existing one.
              5. Select "Download customized host" when you are done.
              6. Install the customized host module on your Android device.
              -

              How to use TeamViewer Host v15.3.30

              -

              To use TeamViewer Host v15.3.30, you need to grant permissions and assign devices first. Then you can connect to unattended Android devices and perform remote actions and file transfers.

              -

              Grant permissions and assign devices

              -

              To grant permissions and assign devices, you need to:

              -
              1. Login to your TeamViewer account on https://login.teamviewer.com/.
              2. Select "Devices" from the menu on the left.
              3. Select "Add Device" from the submenu.
              4. Select "Android" as the device type.
              5. Select "TeamViewer Host" as the app type.
              6. Select "Assign Device" as the action type.
              7. Select "Scan QR code" as the assignment method.
              8. A QR code will be generated on your computer screen.
              9. Open TeamViewer Host on your Android device and tap on "Assign Device".
              10. -Scan the QR code with your device camera.
              -

              You will see a confirmation message that your device has been assigned successfully. You will also see a list of permissions that TeamViewer Host requires for remote access and control. Tap on "Grant Permissions" and follow the instructions to allow them.

              -

              -Connect to unattended Android devices

              -

              To connect to unattended Android devices, you need to:

              -
                -
              1. -Open TeamViewer on your computer or mobile device.
              2. -
              3. -Enter the ID of the unattended Android device that you want to connect to in the "Partner ID" field.
              4. -
              5. -Click on "Connect".
              6. -
              7. -Enter your password if prompted.
              8. -
              9. -You will see a remote screen of your unattended Android device.
              10. -
              11. -You can use your mouse or keyboard to control it as if you were holding it in your hands.
              12. -
              -

              -Perform remote actions and file transfers

              -

              To perform remote actions and file transfers, you need to:

              -
                -
              1. -Connect to an unattended Android device as described above.
              2. -
              3. -Click on the toolbar at the top of the remote screen.
              4. -
              5. -You will see a menu with various options such as:
              6. -
                  -
                • "Remote Control": Allows you to switch between touch mode and mouse mode for controlling your unattended Android device.
                • -
                • "File Transfer": Allows you to transfer files between your computer or mobile device and your unattended Android device.
                • -
                • "Remote Settings": Allows you to change settings such as sound output, screen resolution, quality optimization, etc., for your unattended Android device.
                • -
                • "Actions": Allows you to perform actions such as rebooting, locking, unlocking, wiping data etc., on your unattended Android device.
                • -| Article with HTML formatting | | ---------------------------- | | ... | |
                • "Extras": Allows you to access extra features such as remote printing, screenshot, annotation, etc., on your unattended Android device.
              | |

              Conclusion

              | |

              TeamViewer Host v15.3.30 is a powerful and versatile software that enables you to access and manage unattended Android devices remotely. It is easy to download, install, and use, and it offers many benefits such as security, flexibility, and convenience. Whether you need to monitor, maintain, or support Android devices for personal or professional purposes, TeamViewer Host can help you do that efficiently and effectively.

              | |

              If you want to learn more about TeamViewer Host or other TeamViewer products, you can visit their official website at https://www.teamviewer.com/ or contact their customer support team at https://www.teamviewer.com/en-us/support/.

              | |

              FAQs

              | |

              What is the difference between TeamViewer Host and TeamViewer QuickSupport?

              | |

              TeamViewer Host and TeamViewer QuickSupport are both apps that allow you to access and control Android devices remotely. However, TeamViewer Host is designed for unattended access, which means you can access the device anytime without having to accept the incoming connection on the device. TeamViewer QuickSupport is designed for attended access, which means you need to accept the incoming connection on the device every time.

              | |

              How much does TeamViewer Host cost?

              | |

              TeamViewer Host is free for personal use. For commercial use, you need to purchase a subscription plan that suits your needs and budget. You can choose from different plans based on the number of devices, users, and channels you need. You can also try TeamViewer Host for free for 14 days before buying a subscription. You can check the pricing details at https://www.teamviewer.com/en-us/pricing/.

              | |

              How can I update TeamViewer Host?

              | |

              You can update TeamViewer Host automatically or manually. To update automatically, you need to enable the "Auto-update" option in the settings of the app. To update manually, you need to uninstall the current version of the app and install the latest version from the official website or Google Play Store.

              | |

              How can I uninstall TeamViewer Host?

              | |

              You can uninstall TeamViewer Host from your Android device by following these steps:

              | |
              1. Open TeamViewer Host on your device and tap on the menu icon at the top left corner.
              2. Select "Settings" from the menu.
              3. Select "Uninstall" from the settings.
              4. Confirm your action by tapping on "OK".
              5. You may need to enter your device password or PIN to complete the uninstallation.
              | |

              How can I contact TeamViewer support?

              | |

              You can contact TeamViewer support by visiting their website at https://www.teamviewer.com/en-us/support/. You can find answers to common questions in their knowledge base, submit a ticket for technical issues, chat with a support agent online, or call their phone number for urgent matters.

              |

              0a6ba089eb
              -
              -
              \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Use TeamViewer Pro to Connect and Control Any Device Anywhere Anytime.md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Use TeamViewer Pro to Connect and Control Any Device Anywhere Anytime.md deleted file mode 100644 index d271ff5aa7235a1851b54d6b3f460a3e5400f8bd..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Use TeamViewer Pro to Connect and Control Any Device Anywhere Anytime.md +++ /dev/null @@ -1,49 +0,0 @@ -
              -

              How to Download TeamViewer Pro for Remote Access and Support

              -

              TeamViewer Pro is a powerful and secure software that allows you to remotely access and control any device, anywhere, anytime. With TeamViewer Pro, you can provide instant support to your clients, colleagues, or friends, as well as monitor, patch, and protect your devices. TeamViewer Pro also offers advanced features such as file transfer, meeting, and collaboration tools.

              -

              In this article, we will show you how to download TeamViewer Pro for Windows, Mac, Linux, or mobile devices. We will also explain the benefits of using TeamViewer Pro and how to get a license for your business or personal use.

              -

              download teamviewer pro crack


              DOWNLOADhttps://urlcod.com/2uK6yr



              -

              How to Download TeamViewer Pro for Windows

              -

              If you want to download TeamViewer Pro for Windows, you can follow these steps:

              -
                -
              1. Go to the TeamViewer website and click on the Download button for the 64-bit or 32-bit version of TeamViewer Remote.
              2. -
              3. Run the downloaded file and follow the installation instructions.
              4. -
              5. Once installed, launch TeamViewer Remote and enter your license key or sign in with your TeamViewer account.
              6. -
              7. You can now establish incoming and outgoing connections between devices and use all the features of TeamViewer Pro.
              8. -
              -

              If you don't want to install TeamViewer Remote on your computer, you can also use the web client via your browser or download TeamViewer Portable on a USB stick or the Cloud.

              -

              How to Download TeamViewer Pro for Mac

              -

              If you want to download TeamViewer Pro for Mac, you can follow these steps:

              -
                -
              1. Go to the TeamViewer website and click on the Download button for TeamViewer Remote.
              2. -
              3. Open the downloaded file and drag the TeamViewer icon into the Applications folder.
              4. -
              5. Once installed, launch TeamViewer Remote and enter your license key or sign in with your TeamViewer account.
              6. -
              7. You can now establish incoming and outgoing connections between devices and use all the features of TeamViewer Pro.
              8. -
              -

              How to Download TeamViewer Pro for Linux

              -

              If you want to download TeamViewer Pro for Linux, you can follow these steps:

              -
                -
              1. Go to the TeamViewer website and choose the package that suits your Linux distribution (DEB, RPM, TAR.GZ).
              2. -
              3. Run the downloaded file and follow the installation instructions.
              4. -
              5. Once installed, launch TeamViewer Remote and enter your license key or sign in with your TeamViewer account.
              6. -
              7. You can now establish incoming and outgoing connections between devices and use all the features of TeamViewer Pro.
              8. -
              -

              How to Download TeamViewer Pro for Mobile Devices

              -

              If you want to download TeamViewer Pro for mobile devices, you can follow these steps:

              -
                -
              1. Go to the TeamViewer website and choose the app that suits your device (Android, iOS, Chrome OS).
              2. -
              3. Download and install the app from the Google Play Store, Apple App Store, or Chrome Web Store.
              4. -
              5. Once installed, launch the app and enter your license key or sign in with your TeamViewer account.
              6. -
              7. You can now establish incoming and outgoing connections between devices and use all the features of TeamViewer Pro.
              8. -
              -

              The Benefits of Using TeamViewer Pro

              -

              TeamViewer Pro is more than just a remote access and support software. It also offers many benefits for your business or personal use, such as:

              -
                -
              • The fastest and most reliable connection speed among remote control software providers.
              • -
              • The highest level of security and encryption standards to protect your data and privacy.
              • -
              • The ability to support any device from any place, any time, including mobile devices and IoT devices.
              • -
              • The option to scale, manage, and secure support experiences across your enterprise with TeamViewer Tensor Platform.
              • -
              • The opportunity to empower your frontline workforce and streamline shopfloor operations

                -

                ddb901b051
                -
                -
                \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Iron Man 3 Dual Audio 1080p Bluray 2013 Everything You Need to Know About the Film.md b/spaces/tialenAdioni/chat-gpt-api/logs/Iron Man 3 Dual Audio 1080p Bluray 2013 Everything You Need to Know About the Film.md deleted file mode 100644 index 4496f7d5da9ec42d721231b352241d8a4ef5c394..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Iron Man 3 Dual Audio 1080p Bluray 2013 Everything You Need to Know About the Film.md +++ /dev/null @@ -1,62 +0,0 @@ - -

                Iron Man 3: A Thrilling and Explosive Adventure

                -

                Iron Man 3 is the third installment in the Marvel Cinematic Universe's Iron Man franchise, starring Robert Downey Jr. as the genius billionaire Tony Stark, who faces a new and powerful enemy: the Mandarin, played by Ben Kingsley. The film was directed by Shane Black, who co-wrote the screenplay with Drew Pearce, based on the comic book storyline "Extremis" by Warren Ellis and Adi Granov.

                -

                The film follows Tony Stark as he struggles with post-traumatic stress disorder after the events of The Avengers, while also dealing with a series of terrorist attacks orchestrated by the Mandarin, who threatens to destroy everything he loves. Tony must use his ingenuity and resources to fight back and uncover the truth behind the Mandarin's identity and motives. Along the way, he encounters old friends and foes, such as Pepper Potts (Gwyneth Paltrow), James Rhodes (Don Cheadle), Happy Hogan (Jon Favreau), Aldrich Killian (Guy Pearce), and Maya Hansen (Rebecca Hall).

                -

                Iron Man 3 Dual Audio 1080p Bluray 2013


                Download 🌟 https://urlcod.com/2uK8S1



                -

                Iron Man 3 was released in 2013 to critical and commercial success, grossing over $1.2 billion worldwide and becoming the second-highest-grossing film of that year. The film received praise for its action sequences, humor, performances, and visual effects, as well as its exploration of Tony Stark's character development and vulnerability. The film also featured a controversial twist involving the Mandarin's true identity, which divided some fans and critics.

                -

                Iron Man 3 is available in dual audio (English and Hindi) in 1080p BluRay quality, which offers a stunning and immersive viewing experience. The BluRay disc also includes bonus features such as deleted scenes, behind-the-scenes footage, commentary by Shane Black and Drew Pearce, and a short film called "All Hail the King", which follows the fate of the Mandarin after the events of Iron Man 3.

                -

                Iron Man 3 Hindi English 1080p Bluray Download
                -Download Iron Man 3 Dual Audio Full HD Movie
                -Iron Man 3 1080p Bluray Dual Audio Torrent
                -Watch Iron Man 3 Online Free Dual Audio HD
                -Iron Man 3 Dual Audio Hindi Eng Bluray Rip
                -How to Download Iron Man 3 Dual Audio 1080p
                -Iron Man 3 Full Movie Dual Audio HD Quality
                -Iron Man 3 Dual Audio Bluray Subtitles Download
                -Iron Man 3 Hindi Dubbed 1080p Bluray Movie
                -Iron Man 3 Dual Audio HD Streaming Online
                -Iron Man 3 English Hindi 1080p Bluray MKV
                -Iron Man 3 Dual Audio Bluray Review and Rating
                -Iron Man 3 Hindi English Full Movie 1080p Download
                -Iron Man 3 Dual Audio HD Trailer and Clips
                -Iron Man 3 Bluray Dual Audio Cast and Crew
                -Iron Man 3 Dual Audio Hindi Eng 1080p MP4
                -Iron Man 3 Full HD Movie Dual Audio Free
                -Iron Man 3 Dual Audio Bluray Extras and Bonus
                -Iron Man 3 Hindi English HD Movie Watch Online
                -Iron Man 3 Dual Audio Torrent Download Link
                -Iron Man 3 Bluray Dual Audio IMDB and Rotten Tomatoes
                -Iron Man 3 Dual Audio Hindi Eng Movie Plot and Synopsis
                -Iron Man 3 Full Movie Download Dual Audio HD
                -Iron Man 3 Dual Audio Bluray Release Date and Price
                -Iron Man 3 Hindi English Movie Online Streaming
                -Iron Man 3 Dual Audio HD Movie Download Site
                -Iron Man 3 Bluray Dual Audio Runtime and Genre
                -Iron Man 3 Dual Audio Hindi Eng Movie Facts and Trivia
                -Iron Man 3 Full HD Movie Watch Online Dual Audio
                -Iron Man 3 Dual Audio Bluray Awards and Nominations
                -Iron Man 3 Hindi English Movie Download Link
                -Iron Man 3 Dual Audio HD Movie Google Drive
                -Iron Man 3 Bluray Dual Audio Box Office and Budget
                -Iron Man 3 Dual Audio Hindi Eng Movie Quotes and Dialogues
                -Iron Man 3 Full Movie Online Free Dual Audio HD
                -Iron Man 3 Dual Audio Bluray Soundtrack and Score
                -Iron Man 3 Hindi English Movie Torrent Magnet Link
                -Iron Man 3 Dual Audio HD Movie Filming Locations and Sets
                -Iron Man 3 Bluray Dual Audio Easter Eggs and References
                -Iron Man 3 Dual Audio Hindi Eng Movie Characters and Roles
                -Iron Man 3 Full Movie Free Download Dual Audio HD
                -Iron Man 3 Dual Audio Bluray Behind the Scenes and Making Of
                -Iron Man 3 Hindi English Movie Online Watch Link
                -Iron Man 3 Dual Audio HD Movie Deleted Scenes and Bloopers
                -Iron Man 3 Bluray Dual Audio Director's Cut and Alternate Ending
                -Iron Man 3 Dual Audio Hindi Eng Movie Themes and Messages
                -Iron Man 3 Full Movie Streaming Online Dual Audio HD

                -

                If you are a fan of Iron Man or Marvel movies in general, you should not miss this thrilling and explosive adventure that will keep you on the edge of your seat. Iron Man 3 is a must-have for your BluRay collection.

                - -

                Iron Man 3 boasts an impressive cast and crew, who brought the film to life with their talent and dedication. Robert Downey Jr. reprised his role as Tony Stark/Iron Man, delivering a charismatic and nuanced performance that earned him a Golden Globe nomination for Best Actor in a Musical or Comedy. Ben Kingsley played the Mandarin, a mysterious and menacing villain who challenges Tony on both a physical and psychological level. Kingsley's portrayal of the Mandarin was praised for its complexity and unpredictability, as well as its comedic elements.

                -

                The film also featured a strong supporting cast, including Gwyneth Paltrow as Pepper Potts, Tony's loyal and loving girlfriend and business partner; Don Cheadle as James Rhodes/War Machine, Tony's best friend and fellow superhero; Guy Pearce as Aldrich Killian, a brilliant scientist and founder of Advanced Idea Mechanics (AIM); Rebecca Hall as Maya Hansen, a former colleague and lover of Tony; Jon Favreau as Happy Hogan, Tony's former bodyguard and head of security; and Ty Simpkins as Harley Keener, a young boy who helps Tony in his time of need.

                -

                The film was directed by Shane Black, who had previously worked with Robert Downey Jr. on the 2005 film Kiss Kiss Bang Bang. Black brought his signature style of witty dialogue, dark humor, and action to Iron Man 3, while also adding his own personal touch to the story and characters. Black co-wrote the screenplay with Drew Pearce, who had also written the script for the Marvel One-Shot All Hail the King. The duo crafted a script that balanced humor, drama, and spectacle, while also addressing themes such as trauma, identity, and redemption.

                -

                The film's visual effects were created by several companies, including Digital Domain, Weta Digital, Scanline VFX, Trixter Film, Framestore, Luma Pictures, Fuel VFX, The Embassy Visual Effects, Method Studios, and Industrial Light & Magic. The film featured over 2,000 visual effects shots, ranging from realistic explosions and environments to fantastical creatures and technology. The film's most notable visual effects sequence was the "House Party Protocol", which involved Tony summoning dozens of Iron Man suits to fight against the Mandarin's army of Extremis soldiers.

                e753bf7129
                -
                -
                \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/8 Ball Pool Hack Tool Mini Ruler APK Download and Installation Tutorial.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/8 Ball Pool Hack Tool Mini Ruler APK Download and Installation Tutorial.md deleted file mode 100644 index 23eaa33393ae0d58abc136a084c8c8bdd6a02671..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/8 Ball Pool Hack Tool Mini Ruler APK Download and Installation Tutorial.md +++ /dev/null @@ -1,96 +0,0 @@ -
                -

                How to Download and Use 8 Ball Pool Mini Ruler APK

                -

                If you are a fan of 8 ball pool, you might have heard of 8 ball pool mini ruler apk. This is a tool that can help you improve your skills and win more games in this popular online multiplayer pool game. In this article, we will tell you what 8 ball pool mini ruler apk is, what are its features, how to download and use it, and what are some tips and tricks to play better 8 ball pool with it. We will also share some reviews from other users who have tried it and answer some frequently asked questions.

                -

                What is 8 Ball Pool Mini Ruler APK?

                -

                8 ball pool mini ruler apk is a tool that can help you aim better and make more accurate shots in 8 ball pool. It is an application that you can download and install on your Android device. It works by overlaying a transparent ruler on your screen that shows you the angle and direction of your cue stick and the cue ball. You can use this ruler to adjust your aim and power before hitting the cue ball. This way, you can increase your chances of pocketing the balls and winning the game.

                -

                8 ball pool mini ruler apk download


                Download Filehttps://bltlly.com/2uOjIR



                -

                What are the Features of 8 Ball Pool Mini Ruler APK?

                -

                Some of the main features of 8 ball pool mini ruler apk are:

                -
                  -
                • It is undetectable by the game server, so you don't have to worry about getting banned or penalized for using it.
                • -
                • It is stable and customizable, so you can adjust the size, color, and transparency of the ruler according to your preference.
                • -
                • It works with any table, cue, or game mode in 8 ball pool.
                • -
                • It is easy to use and does not require any root or special permissions.
                • -
                • It is free to download and use.
                • -
                -

                How to Download and Use 8 Ball Pool Mini Ruler APK?

                -

                To download and use 8 ball pool mini ruler apk, follow these steps:

                -

                How to download mini ruler for 8 ball pool
                -8 ball pool mini ruler hack apk free download
                -Mini ruler 8 ball pool download for pc
                -8 ball pool mini ruler latest version download
                -8 ball pool mini ruler crack by xen mods
                -Download mini ruler 8 ball pool undetectable
                -8 ball pool mini ruler apk download no root
                -Mini ruler 8 ball pool download for android
                -8 ball pool mini ruler mod apk download
                -Download mini ruler 8 ball pool customizable
                -8 ball pool mini ruler apk download 2021
                -Mini ruler 8 ball pool download for windows
                -8 ball pool mini ruler cheat apk download
                -Download mini ruler 8 ball pool stable
                -8 ball pool mini ruler apk download for ios
                -Mini ruler 8 ball pool download for mac
                -8 ball pool mini ruler tool apk download
                -Download mini ruler 8 ball pool full experience
                -8 ball pool mini ruler apk download online
                -Mini ruler 8 ball pool download for linux
                -8 ball pool mini ruler patch apk download
                -Download mini ruler 8 ball pool easy to use
                -8 ball pool mini ruler apk download offline
                -Mini ruler 8 ball pool download for chromebook
                -8 ball pool mini ruler update apk download
                -Download mini ruler 8 ball pool safe and secure
                -8 ball pool mini ruler apk download without survey
                -Mini ruler 8 ball pool download for firefox
                -8 ball pool mini ruler premium apk download
                -Download mini ruler 8 ball pool fast and reliable
                -8 ball pool mini ruler apk download with guide
                -Mini ruler 8 ball pool download for edge
                -8 ball pool mini ruler pro apk download
                -Download mini ruler 8 ball pool low size
                -8 ball pool mini ruler apk download with tutorial
                -Mini ruler 8 ball pool download for opera
                -8 ball pool mini ruler beta apk download
                -Download mini ruler 8 ball pool high quality
                -8 ball pool mini ruler apk download with support
                -Mini ruler 8 ball pool download for safari
                -8 ball pool mini ruler unlimited coins apk download
                -Download mini ruler 8 ball pool best features
                -8 ball pool mini ruler apk download with reviews
                -Mini ruler 8 ball pool download for brave
                -8 ball pool mini ruler vip apk download
                -Download mini ruler 8 ball pool awesome graphics
                -8 ball pool mini ruler apk download with ratings
                -Mini ruler 8 ball pool download for tor browser
                -8 ball pool mini ruler mega mod apk download

                -
                  -
                1. Go to [1](https://www.dontmisstool.com/) and click on the download button.
                2. -
                3. Wait for the download to finish and then open the apk file.
                4. -
                5. Allow the installation from unknown sources if prompted.
                6. -
                7. Follow the instructions on the screen to install the app.
                8. -
                9. Open the app and grant it permission to access your screen.
                10. -
                11. Open 8 ball pool game and start a match.
                12. -
                13. You will see a transparent ruler on your screen that shows you the angle and direction of your cue stick and the cue ball.
                14. -
                15. You can drag the ruler around to adjust your aim and power before hitting the cue ball.
                16. -
                17. You can also change the settings of the app by tapping on the icon on the top left corner of the screen.
                18. -
                -

                What are Some Tips and Tricks to Play Better 8 Ball Pool with Mini Ruler APK?

                -

                Here are some tips and tricks to play better 8 ball pool with mini ruler apk:

                -
                  -
                • Learn the rules of 8 ball pool before playing. You can find them on [3](https://www.wikihow.com/Play-8-Ball-Pool), [4](https://upatour.com/8-ball-rules/), or [5](https://www.rulesofsport.com/sports/pool.html).
                • -
                • Practice your skills in offline mode or low-stakes tables before playing in higher levels or tournaments.
                • -
                • Choose your table wisely according to your that shows you the angle and direction of your cue stick and the cue ball. You can use this ruler to adjust your aim and power before hitting the cue ball. This way, you can increase your chances of pocketing the balls and winning the game. Some of the main features of 8 ball pool mini ruler apk are that it is undetectable by the game server, stable and customizable, compatible with any table, cue, or game mode, easy to use, and free to download and use. To download and use 8 ball pool mini ruler apk, you need to go to [1](https://www.dontmisstool.com/) and click on the download button. Then, you need to install the app on your device and grant it permission to access your screen. After that, you can open 8 ball pool game and start a match. You will see a transparent ruler on your screen that you can drag around to adjust your aim and power before hitting the cue ball. You can also change the settings of the app by tapping on the icon on the top left corner of the screen. To play better 8 ball pool with mini ruler apk, you need to learn the rules of 8 ball pool, practice your skills in offline mode or low-stakes tables, choose your table wisely, use the mini ruler apk wisely and sparingly, pay attention to the spin, speed, and trajectory of the cue ball and the object balls, plan your shots ahead and think strategically, and have fun and enjoy the game. You can also read some reviews from other users who have downloaded and used 8 ball pool mini ruler apk to see what they think about it. We hope this article has helped you understand what 8 ball pool mini ruler apk is, what are its features, how to download and use it, and what are some tips and tricks to play better 8 ball pool with it. If you are interested in trying it out, you can download it from [1](https://www.dontmisstool.com/) for free. You can also share this article with your friends who play 8 ball pool and want to improve their skills. Happy gaming!

                  FAQs

                  -

                  Here are some frequently asked questions about 8 ball pool mini ruler apk:

                  -

                  Is 8 Ball Pool Mini Ruler APK Safe?

                  -

                  Yes, 8 ball pool mini ruler apk is safe to download and use. It does not contain any viruses or malware that can harm your device or data. It is also undetectable by the game server, so you don't have to worry about getting banned or penalized for using it.

                  -

                  Is 8 Ball Pool Mini Ruler APK Legal?

                  -

                  Yes, 8 ball pool mini ruler apk is legal to download and use. It does not violate any terms of service or policies of 8 ball pool game or Miniclip. It is a tool that can help you improve your skills and enjoy the game more.

                  -

                  How Do I Update 8 Ball Pool Mini Ruler APK?

                  -

                  To update 8 ball pool mini ruler apk, you need to go to [1](https://www.dontmisstool.com/) and check if there is a new version available. If there is, you can download it and install it on your device. You can also enable automatic updates in the settings of the app.

                  -

                  How Do I Uninstall 8 Ball Pool Mini Ruler APK?

                  -

                  To uninstall 8 ball pool mini ruler apk, you need to go to your device settings and find the app in the list of installed applications. Then, you need to tap on it and select uninstall. You can also delete the apk file from your device storage.

                  -

                  How Do I Contact The Developer Of 8 Ball Pool Mini Ruler APK?

                  -

                  To contact the developer of 8 ball pool mini ruler apk, you can go to [1](https://www.dontmisstool.com/) and fill out the contact form on the website. You can also send an email to [2](mailto:info@dontmisstool.com).

                  197e85843d
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Blue Lock Project World Champion APK and Relive the Original Story of the Anime.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Blue Lock Project World Champion APK and Relive the Original Story of the Anime.md deleted file mode 100644 index 5275611b2656dbe5f15547982ca147ed4082cc68..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Blue Lock Project World Champion APK and Relive the Original Story of the Anime.md +++ /dev/null @@ -1,129 +0,0 @@ -
                  -

                  Blue Lock Game APK: How to Download and Play the Soccer Simulation Game Based on the Anime

                  -

                  If you are a fan of soccer and anime, you might have heard of Blue Lock, a manga and anime series that revolves around a project to create the best strikers in the world. But did you know that you can also enjoy the thrilling story and action of Blue Lock on your Android device? In this article, we will tell you everything you need to know about Blue Lock Game APK, how to download and install it, and how to play it. Read on and get ready to unleash your ego and vision on the soccer field!

                  -

                  blue lock game apk


                  Download File ⚹⚹⚹ https://bltlly.com/2uOiK2



                  -

                  What is Blue Lock?

                  -

                  The manga and anime series

                  -

                  Blue Lock is a manga series written by Muneyuki Kaneshiro and illustrated by Yusuke Nomura, serialized in Kodansha’s Weekly Shōnen Magazine since August 2018. It has over 10 million copies in circulation as of June 2023. An anime television series adaptation is set to premiere in October 2022.

                  -

                  The story is set in a near future where Japan has failed to qualify for the FIFA World Cup for four consecutive times. To change this situation, the Japan Football Association launches a project called "Blue Lock", which aims to produce the best strikers in the world. They recruit 300 high school students who have potential as forwards, and lock them up in a facility where they have to compete against each other in a survival game. Only one of them will be chosen as the ultimate striker who will lead Japan to glory.

                  -

                  The soccer project and the characters

                  -

                  The Blue Lock project is based on the idea that soccer is a sport where individual ego and vision are more important than teamwork and cooperation. The players are divided into teams of 11, but they are not allowed to pass the ball or communicate with each other. They have to score goals by themselves, using their skills, instincts, and creativity. The players who fail to score or impress the coaches are eliminated from the project.

                  -

                  The main character of the series is Yoichi Isagi, a high school soccer player who is conflicted about his playing style. He decides to join the project in order to become the best player in the world. He meets other players who have different personalities, backgrounds, and motivations, such as Rin Itoshi, Meguru Bachira, Shidou Nagi, Hyoma Chigiri, Jingo Raichi, Seishiro Nagi, Sae Itoshi, Eita Otoya, Kenyu Yukimiya, Reo Mikage, Rensuke Kunigami, Shuuji Naruhaya, Ryuusei Shidou, Kiyoshi Teppei, Gin Gagamaru, Wataru Kuon, Shouei Barou, Riku Soma, Asahi Naruhaya, Takashi Nikaidou, Keisuke Ishikawa, Kira Hiroto, Yudai Imamura, Kiyomasa Sena, Aiku Hayase, Shun Gotouda, Tatsuya Isaka, Rintarou Fukiya, and many more.

                  -

                  What is Blue Lock Game APK?

                  -

                  The types of games available

                  -

                  Blue Lock Game APK is a term that refers to various Android games that are based on the Blue Lock manga and anime series. Some of them are official games developed by authorized companies, while others are unofficial games created by fans or independent developers. Some examples of Blue Lock Game APK are:

                  -
                    -
                  • BLUE LOCK Project: World Champion: This is an official game developed by RU

                    RUDEL inc. It is a soccer simulation game that lets you experience the Blue Lock project as a coach and train your own strikers. You can choose from over 50 characters from the manga and anime, each with their own skills, stats, and personality. You can also enjoy the original story of the series, as well as various events and challenges.

                    -
                  • Blue Lock: Blaze Battle: This is an unofficial game developed by BAEL, based on the popular anime series. It is a soccer action game that lets you control your favorite characters in 3D matches. You can use their special moves and skills to score goals and defeat your opponents. You can also relive the key moments of the anime in the story mode, or compete with other players online in the battle mode.
                  • -
                  • Blue Lock Striker: This is another unofficial game developed by fans, based on the manga series. It is a soccer simulator game that lets you create your own striker and participate in the Blue Lock project. You can customize your appearance, skills, and style, and challenge other players in various modes. You can also interact with other characters from the series and unlock new scenes and events.
                  • -

                    These are some of the Blue Lock Game APK that you can find online. However, be careful when downloading and installing them, as some of them may contain viruses or malware that can harm your device. Always check the reviews and ratings of the games before downloading them, and use a reliable antivirus software to scan them.

                    -

                    blue lock project world champion apk download
                    -blue lock blaze battle apk free
                    -blue lock soccer anime game apk
                    -blue lock simulation sports game apk
                    -blue lock rudel inc android game apk
                    -blue lock tv anime adaptation game apk
                    -blue lock striker training game apk
                    -blue lock ego jinpachi soccer game apk
                    -blue lock isagi yoichi game apk
                    -blue lock bachira meguru game apk
                    -blue lock kunigami rensuke game apk
                    -blue lock soccer manga game apk
                    -blue lock kodansha weekly shonen magazine game apk
                    -blue lock anime premiere 2022 game apk
                    -blue lock japan soccer team game apk
                    -blue lock net energy gain fusion game apk
                    -blue lock 100 million degrees celsius game apk
                    -blue lock 30 seconds fusion reaction game apk
                    -blue lock holy grail experiment game apk
                    -blue lock mini sun creation game apk
                    -blue lock south korea fusion facility game apk
                    -blue lock kstar facility korea institute of fusion energy game apk
                    -blue lock new scientist article game apk
                    -blue lock the sun news article game apk
                    -blue lock yahoo news article game apk
                    -blue lock qooapp game store download apk
                    -blue lock uptodown android download apk
                    -blue lock apkcombo android download apk
                    -blue lock bael developer android game apk
                    -blue lock horizontal interface action rpg game apk
                    -blue lock 3d matches soccer anime game apk
                    -blue lock carlos martinez review android game apk
                    -blue lock uptodown localization team translation android game apk
                    -blue lock one piece treasure cruise similar android game apk
                    -blue lock dragon ball z dokkan battle similar android game apk
                    -blue lock bleach brave souls similar android game apk
                    -blue lock jojo's bizarre adventure stardust shooters similar android game apk
                    -blue lock yu-gi-oh duel links similar android game apk
                    -blue lock sword art online memory defrag similar android game apk
                    -blue lock one piece thousand storm similar android game apk
                    -blue lock captain tsubasa dream team similar android game apk
                    -blue lock one piece fighting path similar android game apk
                    -blue lock binary gods similar android game apk coming soon
                    -blue lock world of warcraft mobile similar android game apk coming soon
                    -blue lock path of exile mobile similar android game apk coming soon
                    -blue lock final fantasy vii ever crisis similar android game apk coming soon
                    -blue lock polgar magic detective similar android game apk coming soon
                    -blue lock sico special insurgency counter operations similar android game apk coming soon
                    -blue lock clash heroes similar android game apk coming soon
                    -blue lock wex mobile similar android game apk coming soon

                    -

                    How to download and install Blue Lock Game APK?

                    -

                    The steps to follow

                    -

                    If you want to download and install Blue Lock Game APK on your Android device, you need to follow these steps:

                    -
                      -
                    1. First, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
                    2. -
                    3. Next, you need to find a trustworthy website that offers the Blue Lock Game APK that you want to download. You can use Google or any other search engine to look for it, or you can use the links provided in this article.
                    4. -
                    5. Once you find the website, click on the download button and wait for the APK file to be downloaded to your device.
                    6. -
                    7. After the download is complete, locate the APK file in your device's file manager and tap on it to start the installation process.
                    8. -
                    9. Follow the instructions on the screen and grant the necessary permissions to the app.
                    10. -
                    11. Wait for the installation to finish and then launch the app from your home screen or app drawer.
                    12. -
                    -

                    The precautions and tips to consider

                    -

                    As mentioned earlier, downloading and installing Blue Lock Game APK from unknown sources can be risky, as some of them may contain harmful or malicious content. Therefore, you need to take some precautions and tips into account before doing so:

                    -
                      -
                    • Always check the reviews and ratings of the games before downloading them. Look for positive feedback from other users who have tried them, and avoid those that have negative or suspicious comments.
                    • -
                    • Always use a reliable antivirus software to scan the APK files before installing them. This will help you detect and remove any viruses or malware that may be hidden in them.
                    • -
                    • Always backup your data before installing any app from unknown sources. This will help you restore your device in case something goes wrong during or after the installation process.
                    • -
                    • Always update your device's operating system and security patches regularly. This will help you protect your device from potential vulnerabilities and threats.
                    • -
                    -

                    How to play Blue Lock Game APK?

                    -

                    The gameplay and controls

                    -

                    The gameplay and controls of Blue Lock Game APK may vary depending on the type of game that you choose. However, most of them share some common features and mechanics that are based on the manga and anime series. Here are some of them:

                    -
                      -
                    • You can choose from different modes of play, such as story mode, training mode, challenge mode, event mode, or battle mode.
                    • -
                    • You can select your favorite characters from the series, or create your own striker with custom appearance, skills, and style.
                    • -
                    • You can train your strikers by improving their stats, acquiring new skills, and unlocking new scenes and events.
                    • -
                    • You can compete with other players online or offline in various matches and tournaments.
                    • -
                    • You can use different strategies and tactics to score goals and win matches.
                    • -
                    • You can use special moves and skills that are unique to each character.
                    • -
                    -

                    The controls of Blue Lock Game APK are usually simple and intuitive. You can use touch gestures or virtual buttons to move your characters, pass the ball, shoot, tackle, drib

                    ate, dribble, and use special moves. You can also adjust the camera angle, the sound volume, and the game speed in the settings menu.

                    -

                    The strategies and tips to win

                    -

                    Playing Blue Lock Game APK can be challenging and fun, but also frustrating and difficult at times. To help you improve your skills and performance, here are some strategies and tips that you can use to win:

                    -
                      -
                    • Know your characters well. Each character has different strengths and weaknesses, as well as different skills and moves. Learn how to use them effectively and efficiently, and choose the ones that suit your playing style and preferences.
                    • -
                    • Practice regularly. The more you play, the more you will get familiar with the game mechanics, controls, and features. You will also be able to unlock more characters, skills, and scenes as you progress in the game.
                    • -
                    • Be creative and unpredictable. Soccer is a game where individual ego and vision are more important than teamwork and cooperation. Therefore, you need to be creative and unpredictable in your actions and decisions. Use your skills, instincts, and creativity to score goals and win matches.
                    • -
                    • Be confident and ambitious. Soccer is also a game where you need to have confidence and ambition in yourself and your abilities. Don't be afraid to challenge yourself and your opponents, and don't give up easily. Believe in yourself and your vision, and aim for the top.
                    • -
                    -

                    Conclusion

                    -

                    A summary of the main points

                    -

                    In conclusion, Blue Lock Game APK is a great way to enjoy the soccer simulation game based on the manga and anime series of the same name. You can experience the thrilling story and action of Blue Lock on your Android device, by choosing from various games that are available online. You can also download and install them easily by following some simple steps and precautions. You can also play them with ease by using some simple controls and strategies.

                    -

                    A call to action and a recommendation

                    -

                    If you are interested in Blue Lock Game APK, we recommend you to try it out for yourself and see how it works. You can find some of the best Blue Lock Game APK in this article, or you can search for more on the internet. You can also watch the anime series or read the manga series to get more familiar with the story and characters of Blue Lock. We hope you enjoy playing Blue Lock Game APK as much as we do!

                    -

                    FAQs

                    -

                    Here are some of the frequently asked questions about Blue Lock Game APK:

                    -
                      -
                    1. Is Blue Lock Game APK free?
                    2. -

                      Most of the Blue Lock Game APK are free to download and play, but some of them may contain in-app purchases or ads that require real money.

                      -
                    3. Is Blue Lock Game APK safe?
                    4. -

                      Some of the Blue Lock Game APK are safe to download and play, but some of them may contain viruses or malware that can harm your device. Therefore, you need to be careful when downloading and installing them, and use a reliable antivirus software to scan them.

                      -
                    5. Is Blue Lock Game APK legal?
                    6. -

                      Some of the Blue Lock Game APK are legal to download and play, but some of them may infringe the intellectual property rights of the original creators or owners of Blue Lock. Therefore, you need to respect their rights and avoid downloading or playing any unauthorized or illegal games.

                      -
                    7. Is Blue Lock Game APK compatible with my device?
                    8. -

                      Some of the Blue Lock Game APK are compatible with most Android devices, but some of them may require specific system requirements or permissions to run properly. Therefore, you need to check the compatibility of the games before downloading or installing them.

                      -
                    9. Is Blue Lock Game APK updated regularly?
                    10. -

                      Some of the Blue Lock Game APK are updated regularly with new features, content, or bug fixes, but some of them may not receive any updates or support from their developers or publishers. Therefore, you need to check the update status of the games before downloading or installing them.

                      -

                    197e85843d
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/AnyDVD HD 8.3.7.0 Crack License Key Free !LINK! Download 2019.md b/spaces/tioseFevbu/cartoon-converter/scripts/AnyDVD HD 8.3.7.0 Crack License Key Free !LINK! Download 2019.md deleted file mode 100644 index 68f3940a08bd170ebc25873fd704cf53a88c86e9..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/AnyDVD HD 8.3.7.0 Crack License Key Free !LINK! Download 2019.md +++ /dev/null @@ -1,54 +0,0 @@ -
                    -

                    AnyDVD HD 8.3.7.0 Crack License key Free Download 2019

                    -

                    If you are looking for a way to bypass the copy protections of DVDs and Blu-rays, you might have heard of AnyDVD HD, a Windows-based software that can decrypt any disc automatically in the background. However, AnyDVD HD is not a free software, and you need a license key to activate it. In this article, we will show you how to get AnyDVD HD 8.3.7.0 crack license key for free, and how to use it to remove any restriction from your favorite movies.

                    -

                    AnyDVD HD 8.3.7.0 Crack License key Free Download 2019


                    DOWNLOADhttps://urlcod.com/2uHyk3



                    -

                    What is AnyDVD HD and what does it do?

                    -

                    AnyDVD HD is a product of RedFox, formerly known as Slysoft, that allows you to remove any copy protection from DVDs and Blu-rays, such as region codes, CSS, AACS, BD+, Cinavia, etc. It works in the background, so you don't need to do anything manually. You can simply insert the disc into your drive, and AnyDVD HD will decrypt it on the fly.

                    -

                    With AnyDVD HD, you can enjoy your movies without any limitation or annoyance. You can disable unwanted subtitles or audio tracks, remove forced delays or FBI warnings, skip the no-skip marks, and adjust the NTSC and PAL displays. You can also playback your movies with any software or player, such as VLC, PowerDVD, or Windows Media Player. You can even copy or rip your movies with other tools, such as CloneDVD, DVDFab, or HandBrake.

                    -

                    Why do you need a crack license key to use it?

                    -

                    As we mentioned before, AnyDVD HD is not a free software. You need to purchase a license key from RedFox to activate it. The license key costs $41 for one year, or $109 for lifetime. However, not everyone can afford or willing to pay for it. That's why some people look for a crack license key to use AnyDVD HD for free.

                    -

                    A crack license key is a file that can bypass the activation process of AnyDVD HD, so you don't need to enter a valid license key. It usually comes with a setup file that contains the latest version of AnyDVD HD. By using a crack license key, you can enjoy all the features of AnyDVD HD without paying anything.

                    -

                    -

                    What are the benefits of using AnyDVD HD cracked version?

                    -

                    There are many benefits of using AnyDVD HD cracked version, such as:

                    -
                      -
                    • You can save money by not buying a license key.
                    • -
                    • You can access all the features of AnyDVD HD without any restriction.
                    • -
                    • You can update AnyDVD HD cracked version regularly with new crack files.
                    • -
                    • You can uninstall AnyDVD HD cracked version easily if you don't need it anymore.
                    • -
                    -

                    However, there are also some risks of using AnyDVD HD cracked version, such as:

                    -
                      -
                    • You may violate the copyright law by using an illegal software.
                    • -
                    • You may download a fake or malicious crack file that contains viruses or malware.
                    • -
                    • You may encounter some errors or bugs - How to download and install AnyDVD HD 8.3.7.0 crack license key - Where to find the crack file and the setup file - There are many websites that offer AnyDVD HD crack files and setup files, but not all of them are reliable or safe. Some of them may contain outdated or fake files that can harm your computer or fail to activate AnyDVD HD. Therefore, you need to be careful when choosing a source to download AnyDVD HD cracked version. - One of the websites that we recommend is [Crack4Windows], which provides the latest and working crack files and setup files for AnyDVD HD and other software. You can download AnyDVD HD 8.3.7.0 crack license key from this [link]. The file size is about 15 MB, and it is virus-free and tested by many users. - How to run the setup file and the crack file - After downloading the file, you need to extract it with a tool like WinRAR or 7-Zip. You will get two files: a setup file named anydvdsetup.exe, and a crack file named RedFox.AnyDVDHD.exe. - First, you need to run the setup file and follow the instructions to install AnyDVD HD on your computer. You can choose the default settings or customize them according to your preference. The installation process will take a few minutes, and you will see a shortcut icon on your desktop when it is done. - Next, you need to run the crack file and copy it to the installation folder of AnyDVD HD. The default location is C:\Program Files (x86)\RedFox\AnyDVD. You may need to replace the original file with the crack file, so make sure you have administrator rights to do so. You can also create a backup of the original file in case you want to restore it later. - Finally, you need to restart your computer for the changes to take effect. You will see a message saying "AnyDVD is activated" when you launch AnyDVD HD from the shortcut icon or the system tray. - How to activate AnyDVD HD with the license key - If you want to activate AnyDVD HD with a license key instead of a crack file, you can also do so by following these steps: - First, you need to purchase a license key from RedFox's official website or any authorized reseller. You can choose between a one-year license or a lifetime license, depending on your budget and needs. - Next, you need to download and install AnyDVD HD from RedFox's website or from the link provided by the reseller. You can use the same setup file as above, or download the latest version from this [link]. - Finally, you need to enter your license key in the registration window of AnyDVD HD. You can access it by clicking on the Help menu and selecting Register AnyDVD. You will see a confirmation message saying "Thank you for registering AnyDVD" when your license key is valid and accepted.

                      How to use AnyDVD HD cracked version to remove copy protections from DVDs and Blu-rays

                      -

                      Now that you have installed and activated AnyDVD HD cracked version, you can use it to remove any copy protection from DVDs and Blu-rays easily. Here are the steps to follow:

                      -- How to launch AnyDVD HD and select the drive with the disc - To launch AnyDVD HD, you can either double-click on the shortcut icon on your desktop, or right-click on the fox icon in your system tray and select Show AnyDVD. - To select the drive with the disc that you want to decrypt, you can either click on the drop-down menu next to Drive Selection in the main window of AnyDVD HD, or right-click on the fox icon in your system tray and select Drive Selection. - You will see a list of all the drives connected to your computer, and you can choose the one that contains your DVD or Blu-ray disc. You will also see some information about the disc, such as its title, region code, type, size, etc. - How to choose the settings and options for decryption - To choose the settings and options for decryption, you can either click on Settings in the main window of AnyDVD HD, or right-click on the fox icon in your system tray and select Settings. - You will see a window with several tabs, such as Video DVD, Video Blu-ray, Audio CD, Drive, Program, etc. Each tab contains different settings and options that you can customize according to your preference. - For example, in the Video DVD tab, you can choose whether to remove region codes, CSS encryption, user prohibitions, forced subtitles, etc. In the Video Blu-ray tab, you can choose whether to remove AACS encryption, BD+ protection, region codes, Cinavia watermarking, etc - How to copy or rip the decrypted disc with other software or players - After you have chosen the settings and options for decryption, AnyDVD HD will start to decrypt the disc automatically in the background. You don't need to wait for it to finish, as it will work on the fly. - To copy or rip the decrypted disc, you can use any software or player that supports DVD or Blu-ray playback or copying. For example, you can use CloneDVD, DVDFab, HandBrake, VLC, PowerDVD, Windows Media Player, etc. - To copy the disc, you can either make a 1:1 clone of the original disc, or compress it to fit a smaller disc or a USB drive. You can also choose to copy only the main movie or the selected titles, and remove the unwanted extras or menus. - To rip the disc, you can either convert it to a digital file format, such as MP4, MKV, AVI, etc., or extract the audio or video streams separately. You can also choose to rip only the main movie or the selected titles, and adjust the quality and size of the output file. - To play the disc, you can either insert it into your drive and launch your preferred software or player, or mount it as a virtual drive and play it from there. You can also stream it to your TV or other devices via a network connection.

                      Conclusion

                      -

                      In conclusion, AnyDVD HD is a powerful and easy-to-use tool that can remove any copy protection from DVDs and Blu-rays, and allow you to enjoy your movies without any limitation or annoyance. However, AnyDVD HD is not a free software, and you need a license key to activate it. If you don't want to pay for it, you can use AnyDVD HD 8.3.7.0 crack license key for free, and get all the features of AnyDVD HD without any restriction.

                      -

                      To use AnyDVD HD cracked version, you need to download and install the crack file and the setup file from a reliable source, such as [Crack4Windows]. Then, you need to run the setup file and the crack file, and activate AnyDVD HD with the license key. After that, you can use AnyDVD HD to decrypt any DVD or Blu-ray disc automatically in the background, and copy or rip it with any software or player.

                      -

                      If you want to try AnyDVD HD cracked version for yourself, you can download it from this [link]. We hope this article has been helpful and informative for you. Thank you for reading.

                      -

                      FAQs

                      -

                      What are the system requirements for AnyDVD HD?

                      -

                      The system requirements for AnyDVD HD are:

                      -
                        -
                      • A Windows-based PC with a minimum 2 GHz Processor and 1 GB RAM
                      • -
                      • Windows XP/XP64/VISTA/VISTA64/Win7/Win7-64/Win8/Win8-64/Win10/Win10-64
                      • -
                      • A DVD drive or a Blu-ray drive
                      • -
                      • An internet connection for activation and updates
                      • -
                      -

                      Is AnyDVD HD safe and legal to use?

                      -

                      AnyDVD HD is safe to use if you download it from a trusted source, such as [Crack4Windows]. However, using AnyDVD HD cracked version may be illegal in some countries or regions, as it may violate the copyright law or the terms of service of RedFox. Therefore, we advise you to use AnyDVD HD cracked version at your own risk and responsibility.

                      -

                      What are the differences between AnyDVD and AnyDVD HD?

                      -

                      AnyDVD and AnyDVD HD are both products of RedFox that can remove copy protections from DVDs. However, AnyDVD HD has some additional features that AnyDVD does not have, such as:

                      -
                        -
                      • It can remove copy protections from Blu-rays as well as DVDs.
                      • -
                      • It can remove AACS encryption and BD+ protection from Blu-rays.
                      • -
                      • It can remove Cinavia watermarking from Blu-rays.
                      • -
                      • It can support new discs with updated protection schemes faster than AnyDVD.
                      • -
                      -

                      How to update AnyDVD HD cracked version?

                      -

                      To update AnyDVD HD cracked version, you need to download and install the latest crack file and setup file from [Crack4Windows] or another reliable source. Then, you need to repeat the same steps as above to run the setup file and the crack file, and activate AnyDVD HD with the license key. You may also need to uninstall the previous version of AnyDVD HD before installing the new one.

                      -

                      How to uninstall AnyDVD HD cracked version?

                      -

                      To uninstall AnyDVD HD cracked version, you need to To uninstall AnyDVD HD cracked version, you need to follow these steps: - Close AnyDVD HD if it is running in the background. You can right-click on the fox icon in your system tray and select Exit. - Go to the Control Panel and select Programs and Features. You can also press Windows + R keys and type appwiz.cpl in the Run box. - Find AnyDVD HD in the list of installed programs and click on Uninstall. You may need to confirm your action by clicking on Yes or OK. - Follow the instructions to complete the uninstallation process. You may need to restart your computer for the changes to take effect. - Delete the crack file and the setup file from your computer. You can also delete the backup file of the original file if you have created one.

                      b2dd77e56b
                      -
                      -
                      \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Leadership And Self Deception Book Pdf Free [2021] 29.md b/spaces/tioseFevbu/cartoon-converter/scripts/Leadership And Self Deception Book Pdf Free [2021] 29.md deleted file mode 100644 index 67c5c20f9c26d86a3704c3024844497f4d1f6bd8..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Leadership And Self Deception Book Pdf Free [2021] 29.md +++ /dev/null @@ -1,16 +0,0 @@ -
                      -

                      How Leadership and Self-Deception Can Help You Achieve Your Goals

                      -

                      Leadership and Self-Deception is a book by the Arbinger Institute that reveals how the problems that prevent us from achieving our goals are often caused by a hidden problem: self-deception. Self-deception is the tendency to see ourselves and others in distorted ways, based on our own self-serving motives and assumptions. When we are trapped in self-deception, we act as if we are in a box, unable to see the reality around us and the impact of our actions on others.

                      -

                      But there is a way out of the box. The book shows how we can recognize and overcome self-deception by shifting our mindset from inward to outward. An inward mindset is focused on our own needs, desires, and objectives, while an outward mindset is focused on the needs, desires, and objectives of others. By adopting an outward mindset, we can become more aware of our impact on others, more open to feedback, more collaborative, more accountable, and more effective.

                      -

                      Leadership And Self Deception Book Pdf Free 29


                      Download > https://urlcod.com/2uHvWo



                      -

                      The book uses an engaging story of an executive facing challenges at work and at home to illustrate the concepts and tools of leadership and self-deception. It also provides practical examples and applications of how readers can use the book in various areas of their lives, such as hiring, teambuilding, conflict resolution, accountability, and personal growth and development.

                      -

                      If you want to learn more about leadership and self-deception, you can download a free sample chapter of the book from the Arbinger Institute website[^1^]. You can also take a free mindset assessment to gauge your individual and organizational mindsets[^1^]. Additionally, you can access free supporting resources such as diagrams[^1^] and whitepapers[^1^] that explain the intellectual foundations and implications of leadership and self-deception.

                      -

                      Leadership and Self-Deception is a book that can help you transform your performance, relationships, and results by changing your mindset. By getting out of the box of self-deception, you can become a more effective leader and a happier person.

                      - -

                      One of the main benefits of reading Leadership and Self-Deception is that it helps us understand how self-deception affects not only our leadership but also our personal relationships. The book shows how self-deception can cause us to treat others as objects rather than as people, leading to resentment, blame, and conflict. It also shows how self-deception can prevent us from seeing our own role in creating and sustaining these problems, making us feel justified and victimized.

                      -

                      By contrast, when we get out of the box of self-deception, we can treat others as people with their own needs, feelings, and perspectives. We can also take responsibility for our own actions and impact on others, and seek to help rather than to hurt. This leads to more trust, respect, and collaboration in our personal and professional relationships.

                      -

                      Another benefit of reading Leadership and Self-Deception is that it helps us improve our performance and results. The book explains how self-deception can cause us to focus on the wrong things, such as our own ego, status, or agenda, rather than on the real goals and needs of the organization or the customer. It also explains how self-deception can make us resistant to feedback, learning, and change, limiting our growth and potential.

                      -

                      On the other hand, when we get out of the box of self-deception, we can focus on the right things, such as delivering value, solving problems, and serving others. We can also be more open to feedback, learning, and change, enhancing our skills and abilities.

                      -

                      7b8c122e87
                      -
                      -
                      \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcsgroupprober.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcsgroupprober.py deleted file mode 100644 index cad001cb10ed3cfa1608c8e3d3cde1249bc5121c..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcsgroupprober.py +++ /dev/null @@ -1,88 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .charsetgroupprober import CharSetGroupProber -from .hebrewprober import HebrewProber -from .langbulgarianmodel import ISO_8859_5_BULGARIAN_MODEL, WINDOWS_1251_BULGARIAN_MODEL -from .langgreekmodel import ISO_8859_7_GREEK_MODEL, WINDOWS_1253_GREEK_MODEL -from .langhebrewmodel import WINDOWS_1255_HEBREW_MODEL - -# from .langhungarianmodel import (ISO_8859_2_HUNGARIAN_MODEL, -# WINDOWS_1250_HUNGARIAN_MODEL) -from .langrussianmodel import ( - IBM855_RUSSIAN_MODEL, - IBM866_RUSSIAN_MODEL, - ISO_8859_5_RUSSIAN_MODEL, - KOI8_R_RUSSIAN_MODEL, - MACCYRILLIC_RUSSIAN_MODEL, - WINDOWS_1251_RUSSIAN_MODEL, -) -from .langthaimodel import TIS_620_THAI_MODEL -from .langturkishmodel import ISO_8859_9_TURKISH_MODEL -from .sbcharsetprober import SingleByteCharSetProber - - -class SBCSGroupProber(CharSetGroupProber): - def __init__(self): - super().__init__() - hebrew_prober = HebrewProber() - logical_hebrew_prober = SingleByteCharSetProber( - WINDOWS_1255_HEBREW_MODEL, is_reversed=False, name_prober=hebrew_prober - ) - # TODO: See if using ISO-8859-8 Hebrew model works better here, since - # it's actually the visual one - visual_hebrew_prober = SingleByteCharSetProber( - WINDOWS_1255_HEBREW_MODEL, is_reversed=True, name_prober=hebrew_prober - ) - hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober) - # TODO: ORDER MATTERS HERE. I changed the order vs what was in master - # and several tests failed that did not before. Some thought - # should be put into the ordering, and we should consider making - # order not matter here, because that is very counter-intuitive. - self.probers = [ - SingleByteCharSetProber(WINDOWS_1251_RUSSIAN_MODEL), - SingleByteCharSetProber(KOI8_R_RUSSIAN_MODEL), - SingleByteCharSetProber(ISO_8859_5_RUSSIAN_MODEL), - SingleByteCharSetProber(MACCYRILLIC_RUSSIAN_MODEL), - SingleByteCharSetProber(IBM866_RUSSIAN_MODEL), - SingleByteCharSetProber(IBM855_RUSSIAN_MODEL), - SingleByteCharSetProber(ISO_8859_7_GREEK_MODEL), - SingleByteCharSetProber(WINDOWS_1253_GREEK_MODEL), - SingleByteCharSetProber(ISO_8859_5_BULGARIAN_MODEL), - SingleByteCharSetProber(WINDOWS_1251_BULGARIAN_MODEL), - # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250) - # after we retrain model. - # SingleByteCharSetProber(ISO_8859_2_HUNGARIAN_MODEL), - # SingleByteCharSetProber(WINDOWS_1250_HUNGARIAN_MODEL), - SingleByteCharSetProber(TIS_620_THAI_MODEL), - SingleByteCharSetProber(ISO_8859_9_TURKISH_MODEL), - hebrew_prober, - logical_hebrew_prober, - visual_hebrew_prober, - ] - self.reset() diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/_in_process.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/_in_process.py deleted file mode 100644 index 954a4ab05e9b8295f6d455a339654779ee7ec3c8..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/_in_process.py +++ /dev/null @@ -1,363 +0,0 @@ -"""This is invoked in a subprocess to call the build backend hooks. - -It expects: -- Command line args: hook_name, control_dir -- Environment variables: - PEP517_BUILD_BACKEND=entry.point:spec - PEP517_BACKEND_PATH=paths (separated with os.pathsep) -- control_dir/input.json: - - {"kwargs": {...}} - -Results: -- control_dir/output.json - - {"return_val": ...} -""" -from glob import glob -from importlib import import_module -import json -import os -import os.path -from os.path import join as pjoin -import re -import shutil -import sys -import traceback - -# This file is run as a script, and `import compat` is not zip-safe, so we -# include write_json() and read_json() from compat.py. -# -# Handle reading and writing JSON in UTF-8, on Python 3 and 2. - -if sys.version_info[0] >= 3: - # Python 3 - def write_json(obj, path, **kwargs): - with open(path, 'w', encoding='utf-8') as f: - json.dump(obj, f, **kwargs) - - def read_json(path): - with open(path, 'r', encoding='utf-8') as f: - return json.load(f) - -else: - # Python 2 - def write_json(obj, path, **kwargs): - with open(path, 'wb') as f: - json.dump(obj, f, encoding='utf-8', **kwargs) - - def read_json(path): - with open(path, 'rb') as f: - return json.load(f) - - -class BackendUnavailable(Exception): - """Raised if we cannot import the backend""" - def __init__(self, traceback): - self.traceback = traceback - - -class BackendInvalid(Exception): - """Raised if the backend is invalid""" - def __init__(self, message): - self.message = message - - -class HookMissing(Exception): - """Raised if a hook is missing and we are not executing the fallback""" - def __init__(self, hook_name=None): - super(HookMissing, self).__init__(hook_name) - self.hook_name = hook_name - - -def contained_in(filename, directory): - """Test if a file is located within the given directory.""" - filename = os.path.normcase(os.path.abspath(filename)) - directory = os.path.normcase(os.path.abspath(directory)) - return os.path.commonprefix([filename, directory]) == directory - - -def _build_backend(): - """Find and load the build backend""" - # Add in-tree backend directories to the front of sys.path. - backend_path = os.environ.get('PEP517_BACKEND_PATH') - if backend_path: - extra_pathitems = backend_path.split(os.pathsep) - sys.path[:0] = extra_pathitems - - ep = os.environ['PEP517_BUILD_BACKEND'] - mod_path, _, obj_path = ep.partition(':') - try: - obj = import_module(mod_path) - except ImportError: - raise BackendUnavailable(traceback.format_exc()) - - if backend_path: - if not any( - contained_in(obj.__file__, path) - for path in extra_pathitems - ): - raise BackendInvalid("Backend was not loaded from backend-path") - - if obj_path: - for path_part in obj_path.split('.'): - obj = getattr(obj, path_part) - return obj - - -def _supported_features(): - """Return the list of options features supported by the backend. - - Returns a list of strings. - The only possible value is 'build_editable'. - """ - backend = _build_backend() - features = [] - if hasattr(backend, "build_editable"): - features.append("build_editable") - return features - - -def get_requires_for_build_wheel(config_settings): - """Invoke the optional get_requires_for_build_wheel hook - - Returns [] if the hook is not defined. - """ - backend = _build_backend() - try: - hook = backend.get_requires_for_build_wheel - except AttributeError: - return [] - else: - return hook(config_settings) - - -def get_requires_for_build_editable(config_settings): - """Invoke the optional get_requires_for_build_editable hook - - Returns [] if the hook is not defined. - """ - backend = _build_backend() - try: - hook = backend.get_requires_for_build_editable - except AttributeError: - return [] - else: - return hook(config_settings) - - -def prepare_metadata_for_build_wheel( - metadata_directory, config_settings, _allow_fallback): - """Invoke optional prepare_metadata_for_build_wheel - - Implements a fallback by building a wheel if the hook isn't defined, - unless _allow_fallback is False in which case HookMissing is raised. - """ - backend = _build_backend() - try: - hook = backend.prepare_metadata_for_build_wheel - except AttributeError: - if not _allow_fallback: - raise HookMissing() - whl_basename = backend.build_wheel(metadata_directory, config_settings) - return _get_wheel_metadata_from_wheel(whl_basename, metadata_directory, - config_settings) - else: - return hook(metadata_directory, config_settings) - - -def prepare_metadata_for_build_editable( - metadata_directory, config_settings, _allow_fallback): - """Invoke optional prepare_metadata_for_build_editable - - Implements a fallback by building an editable wheel if the hook isn't - defined, unless _allow_fallback is False in which case HookMissing is - raised. - """ - backend = _build_backend() - try: - hook = backend.prepare_metadata_for_build_editable - except AttributeError: - if not _allow_fallback: - raise HookMissing() - try: - build_hook = backend.build_editable - except AttributeError: - raise HookMissing(hook_name='build_editable') - else: - whl_basename = build_hook(metadata_directory, config_settings) - return _get_wheel_metadata_from_wheel(whl_basename, - metadata_directory, - config_settings) - else: - return hook(metadata_directory, config_settings) - - -WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL' - - -def _dist_info_files(whl_zip): - """Identify the .dist-info folder inside a wheel ZipFile.""" - res = [] - for path in whl_zip.namelist(): - m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path) - if m: - res.append(path) - if res: - return res - raise Exception("No .dist-info folder found in wheel") - - -def _get_wheel_metadata_from_wheel( - whl_basename, metadata_directory, config_settings): - """Extract the metadata from a wheel. - - Fallback for when the build backend does not - define the 'get_wheel_metadata' hook. - """ - from zipfile import ZipFile - with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'): - pass # Touch marker file - - whl_file = os.path.join(metadata_directory, whl_basename) - with ZipFile(whl_file) as zipf: - dist_info = _dist_info_files(zipf) - zipf.extractall(path=metadata_directory, members=dist_info) - return dist_info[0].split('/')[0] - - -def _find_already_built_wheel(metadata_directory): - """Check for a wheel already built during the get_wheel_metadata hook. - """ - if not metadata_directory: - return None - metadata_parent = os.path.dirname(metadata_directory) - if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)): - return None - - whl_files = glob(os.path.join(metadata_parent, '*.whl')) - if not whl_files: - print('Found wheel built marker, but no .whl files') - return None - if len(whl_files) > 1: - print('Found multiple .whl files; unspecified behaviour. ' - 'Will call build_wheel.') - return None - - # Exactly one .whl file - return whl_files[0] - - -def build_wheel(wheel_directory, config_settings, metadata_directory=None): - """Invoke the mandatory build_wheel hook. - - If a wheel was already built in the - prepare_metadata_for_build_wheel fallback, this - will copy it rather than rebuilding the wheel. - """ - prebuilt_whl = _find_already_built_wheel(metadata_directory) - if prebuilt_whl: - shutil.copy2(prebuilt_whl, wheel_directory) - return os.path.basename(prebuilt_whl) - - return _build_backend().build_wheel(wheel_directory, config_settings, - metadata_directory) - - -def build_editable(wheel_directory, config_settings, metadata_directory=None): - """Invoke the optional build_editable hook. - - If a wheel was already built in the - prepare_metadata_for_build_editable fallback, this - will copy it rather than rebuilding the wheel. - """ - backend = _build_backend() - try: - hook = backend.build_editable - except AttributeError: - raise HookMissing() - else: - prebuilt_whl = _find_already_built_wheel(metadata_directory) - if prebuilt_whl: - shutil.copy2(prebuilt_whl, wheel_directory) - return os.path.basename(prebuilt_whl) - - return hook(wheel_directory, config_settings, metadata_directory) - - -def get_requires_for_build_sdist(config_settings): - """Invoke the optional get_requires_for_build_wheel hook - - Returns [] if the hook is not defined. - """ - backend = _build_backend() - try: - hook = backend.get_requires_for_build_sdist - except AttributeError: - return [] - else: - return hook(config_settings) - - -class _DummyException(Exception): - """Nothing should ever raise this exception""" - - -class GotUnsupportedOperation(Exception): - """For internal use when backend raises UnsupportedOperation""" - def __init__(self, traceback): - self.traceback = traceback - - -def build_sdist(sdist_directory, config_settings): - """Invoke the mandatory build_sdist hook.""" - backend = _build_backend() - try: - return backend.build_sdist(sdist_directory, config_settings) - except getattr(backend, 'UnsupportedOperation', _DummyException): - raise GotUnsupportedOperation(traceback.format_exc()) - - -HOOK_NAMES = { - 'get_requires_for_build_wheel', - 'prepare_metadata_for_build_wheel', - 'build_wheel', - 'get_requires_for_build_editable', - 'prepare_metadata_for_build_editable', - 'build_editable', - 'get_requires_for_build_sdist', - 'build_sdist', - '_supported_features', -} - - -def main(): - if len(sys.argv) < 3: - sys.exit("Needs args: hook_name, control_dir") - hook_name = sys.argv[1] - control_dir = sys.argv[2] - if hook_name not in HOOK_NAMES: - sys.exit("Unknown hook: %s" % hook_name) - hook = globals()[hook_name] - - hook_input = read_json(pjoin(control_dir, 'input.json')) - - json_out = {'unsupported': False, 'return_val': None} - try: - json_out['return_val'] = hook(**hook_input['kwargs']) - except BackendUnavailable as e: - json_out['no_backend'] = True - json_out['traceback'] = e.traceback - except BackendInvalid as e: - json_out['backend_invalid'] = True - json_out['backend_error'] = e.message - except GotUnsupportedOperation as e: - json_out['unsupported'] = True - json_out['traceback'] = e.traceback - except HookMissing as e: - json_out['hook_missing'] = True - json_out['missing_hook_name'] = e.hook_name or hook_name - - write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) - - -if __name__ == '__main__': - main() diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/common.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/common.py deleted file mode 100644 index 1859fb79cc4e78850b69742fca56698041ce59f8..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/common.py +++ /dev/null @@ -1,424 +0,0 @@ -# common.py -from .core import * -from .helpers import delimited_list, any_open_tag, any_close_tag -from datetime import datetime - - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """Here are some common low-level expressions that may be useful in - jump-starting parser development: - - - numeric forms (:class:`integers`, :class:`reals`, - :class:`scientific notation`) - - common :class:`programming identifiers` - - network addresses (:class:`MAC`, - :class:`IPv4`, :class:`IPv6`) - - ISO8601 :class:`dates` and - :class:`datetime` - - :class:`UUID` - - :class:`comma-separated list` - - :class:`url` - - Parse actions: - - - :class:`convertToInteger` - - :class:`convertToFloat` - - :class:`convertToDate` - - :class:`convertToDatetime` - - :class:`stripHTMLTags` - - :class:`upcaseTokens` - - :class:`downcaseTokens` - - Example:: - - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - - prints:: - - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convert_to_integer = token_map(int) - """ - Parse action for converting parsed integers to Python int - """ - - convert_to_float = token_map(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = ( - Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16)) - ) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = ( - Regex(r"[+-]?\d+") - .set_name("signed integer") - .set_parse_action(convert_to_integer) - ) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = ( - signed_integer().set_parse_action(convert_to_float) - + "/" - + signed_integer().set_parse_action(convert_to_float) - ).set_name("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.add_parse_action(lambda tt: tt[0] / tt[-1]) - - mixed_integer = ( - fraction | signed_integer + Opt(Opt("-").suppress() + fraction) - ).set_name("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.add_parse_action(sum) - - real = ( - Regex(r"[+-]?(?:\d+\.\d*|\.\d+)") - .set_name("real number") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number and returns a float""" - - sci_real = ( - Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)") - .set_name("real number with scientific notation") - .set_parse_action(convert_to_float) - ) - """expression that parses a floating point number with optional - scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).setName("number").streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = ( - Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?") - .set_name("fnumber") - .set_parse_action(convert_to_float) - ) - """any int or real number, returned as float""" - - identifier = Word(identchars, identbodychars).set_name("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex( - r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}" - ).set_name("IPv4 address") - "IPv4 address (``0.0.0.0 - 255.255.255.255``)" - - _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer") - _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name( - "full IPv6 address" - ) - _short_ipv6_address = ( - Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - + "::" - + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) - ).set_name("short IPv6 address") - _short_ipv6_address.add_condition( - lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8 - ) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address") - ipv6_address = Combine( - (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name( - "IPv6 address" - ) - ).set_name("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex( - r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}" - ).set_name("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convert_to_date(fmt: str = "%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) - - Example:: - - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - - prints:: - - [datetime.date(1999, 12, 31)] - """ - - def cvt_fn(ss, ll, tt): - try: - return datetime.strptime(tt[0], fmt).date() - except ValueError as ve: - raise ParseException(ss, ll, str(ve)) - - return cvt_fn - - @staticmethod - def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): - """Helper to create a parse action for converting parsed - datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) - - Example:: - - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - - prints:: - - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - - def cvt_fn(s, l, t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - - return cvt_fn - - iso8601_date = Regex( - r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?" - ).set_name("ISO8601 date") - "ISO8601 date (``yyyy-mm-dd``)" - - iso8601_datetime = Regex( - r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?" - ).set_name("ISO8601 datetime") - "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" - - uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID") - "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" - - _html_stripper = any_open_tag.suppress() | any_close_tag.suppress() - - @staticmethod - def strip_html_tags(s: str, l: int, tokens: ParseResults): - """Parse action to remove HTML tags from web page HTML source - - Example:: - - # strip HTML links from normal text - text = 'More info at the pyparsing wiki page' - td, td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - print(table_text.parseString(text).body) - - Prints:: - - More info at the pyparsing wiki page - """ - return pyparsing_common._html_stripper.transform_string(tokens[0]) - - _commasepitem = ( - Combine( - OneOrMore( - ~Literal(",") - + ~LineEnd() - + Word(printables, exclude_chars=",") - + Opt(White(" \t") + ~FollowedBy(LineEnd() | ",")) - ) - ) - .streamline() - .set_name("commaItem") - ) - comma_separated_list = delimited_list( - Opt(quoted_string.copy() | _commasepitem, default="") - ).set_name("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcase_tokens = staticmethod(token_map(lambda t: t.upper())) - """Parse action to convert tokens to upper case.""" - - downcase_tokens = staticmethod(token_map(lambda t: t.lower())) - """Parse action to convert tokens to lower case.""" - - # fmt: off - url = Regex( - # https://mathiasbynens.be/demo/url-regex - # https://gist.github.com/dperini/729294 - r"^" + - # protocol identifier (optional) - # short syntax // still required - r"(?:(?:(?Phttps?|ftp):)?\/\/)" + - # user:pass BasicAuth (optional) - r"(?:(?P\S+(?::\S*)?)@)?" + - r"(?P" + - # IP address exclusion - # private & local networks - r"(?!(?:10|127)(?:\.\d{1,3}){3})" + - r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" + - r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" + - # IP address dotted notation octets - # excludes loopback network 0.0.0.0 - # excludes reserved space >= 224.0.0.0 - # excludes network & broadcast addresses - # (first & last IP address of each class) - r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" + - r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" + - r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + - r"|" + - # host & domain names, may end with dot - # can be replaced by a shortest alternative - # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+ - r"(?:" + - r"(?:" + - r"[a-z0-9\u00a1-\uffff]" + - r"[a-z0-9\u00a1-\uffff_-]{0,62}" + - r")?" + - r"[a-z0-9\u00a1-\uffff]\." + - r")+" + - # TLD identifier name, may end with dot - r"(?:[a-z\u00a1-\uffff]{2,}\.?)" + - r")" + - # port number (optional) - r"(:(?P\d{2,5}))?" + - # resource path (optional) - r"(?P\/[^?# ]*)?" + - # query string (optional) - r"(\?(?P[^#]*))?" + - # fragment (optional) - r"(#(?P\S*))?" + - r"$" - ).set_name("url") - # fmt: on - - # pre-PEP8 compatibility names - convertToInteger = convert_to_integer - convertToFloat = convert_to_float - convertToDate = convert_to_date - convertToDatetime = convert_to_datetime - stripHTMLTags = strip_html_tags - upcaseTokens = upcase_tokens - downcaseTokens = downcase_tokens - - -_builtin_exprs = [ - v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement) -] diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/command/py36compat.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/command/py36compat.py deleted file mode 100644 index 343547a4d316e48144ba6bdf342dcc24cd6cb6cd..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/command/py36compat.py +++ /dev/null @@ -1,134 +0,0 @@ -import os -from glob import glob -from distutils.util import convert_path -from distutils.command import sdist - - -class sdist_add_defaults: - """ - Mix-in providing forward-compatibility for functionality as found in - distutils on Python 3.7. - - Do not edit the code in this class except to update functionality - as implemented in distutils. Instead, override in the subclass. - """ - - def add_defaults(self): - """Add all the default files to self.filelist: - - README or README.txt - - setup.py - - test/test*.py - - all pure Python modules mentioned in setup script - - all files pointed by package_data (build_py) - - all files defined in data_files. - - all files defined as scripts. - - all C sources listed as part of extensions or C libraries - in the setup script (doesn't catch C headers!) - Warns if (README or README.txt) or setup.py are missing; everything - else is optional. - """ - self._add_defaults_standards() - self._add_defaults_optional() - self._add_defaults_python() - self._add_defaults_data_files() - self._add_defaults_ext() - self._add_defaults_c_libs() - self._add_defaults_scripts() - - @staticmethod - def _cs_path_exists(fspath): - """ - Case-sensitive path existence check - - >>> sdist_add_defaults._cs_path_exists(__file__) - True - >>> sdist_add_defaults._cs_path_exists(__file__.upper()) - False - """ - if not os.path.exists(fspath): - return False - # make absolute so we always have a directory - abspath = os.path.abspath(fspath) - directory, filename = os.path.split(abspath) - return filename in os.listdir(directory) - - def _add_defaults_standards(self): - standards = [self.READMES, self.distribution.script_name] - for fn in standards: - if isinstance(fn, tuple): - alts = fn - got_it = False - for fn in alts: - if self._cs_path_exists(fn): - got_it = True - self.filelist.append(fn) - break - - if not got_it: - self.warn("standard file not found: should have one of " + - ', '.join(alts)) - else: - if self._cs_path_exists(fn): - self.filelist.append(fn) - else: - self.warn("standard file '%s' not found" % fn) - - def _add_defaults_optional(self): - optional = ['test/test*.py', 'setup.cfg'] - for pattern in optional: - files = filter(os.path.isfile, glob(pattern)) - self.filelist.extend(files) - - def _add_defaults_python(self): - # build_py is used to get: - # - python modules - # - files defined in package_data - build_py = self.get_finalized_command('build_py') - - # getting python files - if self.distribution.has_pure_modules(): - self.filelist.extend(build_py.get_source_files()) - - # getting package_data files - # (computed in build_py.data_files by build_py.finalize_options) - for pkg, src_dir, build_dir, filenames in build_py.data_files: - for filename in filenames: - self.filelist.append(os.path.join(src_dir, filename)) - - def _add_defaults_data_files(self): - # getting distribution.data_files - if self.distribution.has_data_files(): - for item in self.distribution.data_files: - if isinstance(item, str): - # plain file - item = convert_path(item) - if os.path.isfile(item): - self.filelist.append(item) - else: - # a (dirname, filenames) tuple - dirname, filenames = item - for f in filenames: - f = convert_path(f) - if os.path.isfile(f): - self.filelist.append(f) - - def _add_defaults_ext(self): - if self.distribution.has_ext_modules(): - build_ext = self.get_finalized_command('build_ext') - self.filelist.extend(build_ext.get_source_files()) - - def _add_defaults_c_libs(self): - if self.distribution.has_c_libraries(): - build_clib = self.get_finalized_command('build_clib') - self.filelist.extend(build_clib.get_source_files()) - - def _add_defaults_scripts(self): - if self.distribution.has_scripts(): - build_scripts = self.get_finalized_command('build_scripts') - self.filelist.extend(build_scripts.get_source_files()) - - -if hasattr(sdist.sdist, '_add_defaults_standards'): - # disable the functionality already available upstream - class sdist_add_defaults: # noqa - pass diff --git a/spaces/tmaham/DS-Fusion-Express/ldm/modules/distributions/__init__.py b/spaces/tmaham/DS-Fusion-Express/ldm/modules/distributions/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/tmnam20/code-summarization/README.md b/spaces/tmnam20/code-summarization/README.md deleted file mode 100644 index 7128e4bb4bb4609896059fa57d7c4a317cf38306..0000000000000000000000000000000000000000 --- a/spaces/tmnam20/code-summarization/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Codebert Code Summarization -emoji: 🏃 -colorFrom: yellow -colorTo: gray -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tomofi/MMOCR/tests/test_models/test_textdet_neck.py b/spaces/tomofi/MMOCR/tests/test_models/test_textdet_neck.py deleted file mode 100644 index 7bee9d7e932e77762769497030c565ca8d59e515..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/tests/test_models/test_textdet_neck.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import pytest -import torch - -from mmocr.models.textdet.necks import FPNC, FPN_UNet - - -def test_fpnc(): - - in_channels = [64, 128, 256, 512] - size = [112, 56, 28, 14] - for flag in [False, True]: - fpnc = FPNC( - in_channels=in_channels, - bias_on_lateral=flag, - bn_re_on_lateral=flag, - bias_on_smooth=flag, - bn_re_on_smooth=flag, - conv_after_concat=flag) - fpnc.init_weights() - inputs = [] - for i in range(4): - inputs.append(torch.rand(1, in_channels[i], size[i], size[i])) - outputs = fpnc.forward(inputs) - assert list(outputs.size()) == [1, 256, 112, 112] - - -def test_fpn_unet_neck(): - s = 64 - feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8] - in_channels = [8, 16, 32, 64] - out_channels = 4 - - # len(in_channcels) is not equal to 4 - with pytest.raises(AssertionError): - FPN_UNet(in_channels + [128], out_channels) - - # `out_channels` is not int type - with pytest.raises(AssertionError): - FPN_UNet(in_channels, [2, 4]) - - feats = [ - torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) - for i in range(len(in_channels)) - ] - - fpn_unet_neck = FPN_UNet(in_channels, out_channels) - fpn_unet_neck.init_weights() - - out_neck = fpn_unet_neck(feats) - assert out_neck.shape == torch.Size([1, out_channels, s * 4, s * 4]) diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/backbone/fpn.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/backbone/fpn.py deleted file mode 100644 index b1190389164fa5057dc4cd17e959969c588ad1ee..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/backbone/fpn.py +++ /dev/null @@ -1,175 +0,0 @@ -# #!/usr/bin/env python3 -# # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -# import torch -# import torch.nn.functional as F -# from torch import nn - - -# class FPN(nn.Module): -# """ -# Module that adds FPN on top of a list of feature maps. -# The feature maps are currently supposed to be in increasing depth -# order, and must be consecutive -# """ - -# def __init__(self, in_channels_list, out_channels, top_blocks=None): -# """ -# Arguments: -# in_channels_list (list[int]): number of channels for each feature map that -# will be fed -# out_channels (int): number of channels of the FPN representation -# top_blocks (nn.Module or None): if provided, an extra operation will -# be performed on the output of the last (smallest resolution) -# FPN output, and the result will extend the result list -# """ -# super(FPN, self).__init__() -# self.inner_blocks = [] -# self.layer_blocks = [] -# for idx, in_channels in enumerate(in_channels_list, 1): -# inner_block = "fpn_inner{}".format(idx) -# layer_block = "fpn_layer{}".format(idx) -# inner_block_module = nn.Conv2d(in_channels, out_channels, 1) -# layer_block_module = nn.Conv2d(out_channels, out_channels, 3, 1, 1) -# for module in [inner_block_module, layer_block_module]: -# # Caffe2 implementation uses XavierFill, which in fact -# # corresponds to kaiming_uniform_ in PyTorch -# nn.init.kaiming_uniform_(module.weight, a=1) -# nn.init.constant_(module.bias, 0) -# self.add_module(inner_block, inner_block_module) -# self.add_module(layer_block, layer_block_module) -# self.inner_blocks.append(inner_block) -# self.layer_blocks.append(layer_block) -# self.top_blocks = top_blocks - -# def forward(self, x): -# """ -# Arguments: -# x (list[Tensor]): feature maps for each feature level. -# Returns: -# results (tuple[Tensor]): feature maps after FPN layers. -# They are ordered from highest resolution first. -# """ -# last_inner = getattr(self, self.inner_blocks[-1])(x[-1]) -# results = [] -# results.append(getattr(self, self.layer_blocks[-1])(last_inner)) -# for feature, inner_block, layer_block in zip( -# x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1] -# ): -# inner_top_down = F.interpolate(last_inner, scale_factor=2, mode="nearest") -# inner_lateral = getattr(self, inner_block)(feature) -# # TODO use size instead of scale to make it robust to different sizes -# # inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:], -# # mode='bilinear', align_corners=False) -# last_inner = inner_lateral + inner_top_down -# results.insert(0, getattr(self, layer_block)(last_inner)) - -# if self.top_blocks is not None: -# last_results = self.top_blocks(results[-1]) -# results.extend(last_results) - -# return tuple(results) - - -# class LastLevelMaxPool(nn.Module): -# def forward(self, x): -# return [F.max_pool2d(x, 1, 2, 0)] - - -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import torch -import torch.nn.functional as F -from torch import nn - -class FPN(nn.Module): - """ - Module that adds FPN on top of a list of feature maps. - The feature maps are currently supposed to be in increasing depth - order, and must be consecutive - """ - - def __init__( - self, in_channels_list, out_channels, conv_block, top_blocks=None - ): - """ - Arguments: - in_channels_list (list[int]): number of channels for each feature map that - will be fed - out_channels (int): number of channels of the FPN representation - top_blocks (nn.Module or None): if provided, an extra operation will - be performed on the output of the last (smallest resolution) - FPN output, and the result will extend the result list - """ - super(FPN, self).__init__() - self.inner_blocks = [] - self.layer_blocks = [] - for idx, in_channels in enumerate(in_channels_list, 1): - inner_block = "fpn_inner{}".format(idx) - layer_block = "fpn_layer{}".format(idx) - - if in_channels == 0: - continue - inner_block_module = conv_block(in_channels, out_channels, 1) - layer_block_module = conv_block(out_channels, out_channels, 3, 1) - self.add_module(inner_block, inner_block_module) - self.add_module(layer_block, layer_block_module) - self.inner_blocks.append(inner_block) - self.layer_blocks.append(layer_block) - self.top_blocks = top_blocks - - def forward(self, x): - """ - Arguments: - x (list[Tensor]): feature maps for each feature level. - Returns: - results (tuple[Tensor]): feature maps after FPN layers. - They are ordered from highest resolution first. - """ - last_inner = getattr(self, self.inner_blocks[-1])(x[-1]) - results = [] - results.append(getattr(self, self.layer_blocks[-1])(last_inner)) - for feature, inner_block, layer_block in zip( - x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1] - ): - if not inner_block: - continue - inner_top_down = F.interpolate(last_inner, scale_factor=2, mode="nearest") - inner_lateral = getattr(self, inner_block)(feature) - # TODO use size instead of scale to make it robust to different sizes - # inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:], - # mode='bilinear', align_corners=False) - last_inner = inner_lateral + inner_top_down - results.insert(0, getattr(self, layer_block)(last_inner)) - - if isinstance(self.top_blocks, LastLevelP6P7): - last_results = self.top_blocks(x[-1], results[-1]) - results.extend(last_results) - elif isinstance(self.top_blocks, LastLevelMaxPool): - last_results = self.top_blocks(results[-1]) - results.extend(last_results) - - return tuple(results) - - -class LastLevelMaxPool(nn.Module): - def forward(self, x): - return [F.max_pool2d(x, 1, 2, 0)] - - -class LastLevelP6P7(nn.Module): - """ - This module is used in RetinaNet to generate extra layers, P6 and P7. - """ - def __init__(self, in_channels, out_channels): - super(LastLevelP6P7, self).__init__() - self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) - self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) - for module in [self.p6, self.p7]: - nn.init.kaiming_uniform_(module.weight, a=1) - nn.init.constant_(module.bias, 0) - self.use_P5 = in_channels == out_channels - - def forward(self, c5, p5): - x = p5 if self.use_P5 else c5 - p6 = self.p6(x) - p7 = self.p7(F.relu(p6)) - return [p6, p7] diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/roi_heads/box_head/__init__.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/modeling/roi_heads/box_head/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py deleted file mode 100644 index 6e124116bcfa9358613507f74ebadb162d8c86a9..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py +++ /dev/null @@ -1,105 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# model settings -model = dict( - type='FCOS', - pretrained='open-mmlab://detectron/resnet50_caffe', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='caffe'), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs=True, - extra_convs_on_inputs=False, # use P5 - num_outs=5, - relu_before_extra_convs=True), - bbox_head=dict( - type='FCOSHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - strides=[8, 16, 32, 64, 128], - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='IoULoss', loss_weight=1.0), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)) -img_norm_cfg = dict( - mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict( - lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='constant', - warmup_iters=500, - warmup_ratio=1.0 / 3, - step=[8, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py deleted file mode 100644 index 504ed5ec5040559b3d10f7caf8a970005a1a92d7..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py +++ /dev/null @@ -1,53 +0,0 @@ -_base_ = '../nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py' - -norm_cfg = dict(type='BN', requires_grad=True) -model = dict( - neck=dict( - _delete_=True, - type='FPG', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - inter_channels=256, - num_outs=5, - add_extra_convs=True, - start_level=1, - stack_times=9, - paths=['bu'] * 9, - same_down_trans=None, - same_up_trans=dict( - type='conv', - kernel_size=3, - stride=2, - padding=1, - norm_cfg=norm_cfg, - inplace=False, - order=('act', 'conv', 'norm')), - across_lateral_trans=dict( - type='conv', - kernel_size=1, - norm_cfg=norm_cfg, - inplace=False, - order=('act', 'conv', 'norm')), - across_down_trans=dict( - type='interpolation_conv', - mode='nearest', - kernel_size=3, - norm_cfg=norm_cfg, - order=('act', 'conv', 'norm'), - inplace=False), - across_up_trans=None, - across_skip_trans=dict( - type='conv', - kernel_size=1, - norm_cfg=norm_cfg, - inplace=False, - order=('act', 'conv', 'norm')), - output_trans=dict( - type='last_conv', - kernel_size=3, - order=('act', 'conv', 'norm'), - inplace=False), - norm_cfg=norm_cfg, - skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) - -evaluation = dict(interval=2) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/transforms.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/transforms.py deleted file mode 100644 index fb141f4735e6c18925d72691597e6ccc2ba45096..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/transforms.py +++ /dev/null @@ -1,246 +0,0 @@ -import numpy as np -import torch - - -def bbox_flip(bboxes, img_shape, direction='horizontal'): - """Flip bboxes horizontally or vertically. - - Args: - bboxes (Tensor): Shape (..., 4*k) - img_shape (tuple): Image shape. - direction (str): Flip direction, options are "horizontal", "vertical", - "diagonal". Default: "horizontal" - - Returns: - Tensor: Flipped bboxes. - """ - assert bboxes.shape[-1] % 4 == 0 - assert direction in ['horizontal', 'vertical', 'diagonal'] - flipped = bboxes.clone() - if direction == 'horizontal': - flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] - flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] - elif direction == 'vertical': - flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] - flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] - else: - flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] - flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] - flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] - flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] - return flipped - - -def bbox_mapping(bboxes, - img_shape, - scale_factor, - flip, - flip_direction='horizontal'): - """Map bboxes from the original image scale to testing scale.""" - new_bboxes = bboxes * bboxes.new_tensor(scale_factor) - if flip: - new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction) - return new_bboxes - - -def bbox_mapping_back(bboxes, - img_shape, - scale_factor, - flip, - flip_direction='horizontal'): - """Map bboxes from testing scale to original image scale.""" - new_bboxes = bbox_flip(bboxes, img_shape, - flip_direction) if flip else bboxes - new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor) - return new_bboxes.view(bboxes.shape) - - -def bbox2roi(bbox_list): - """Convert a list of bboxes to roi format. - - Args: - bbox_list (list[Tensor]): a list of bboxes corresponding to a batch - of images. - - Returns: - Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] - """ - rois_list = [] - for img_id, bboxes in enumerate(bbox_list): - if bboxes.size(0) > 0: - img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) - rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1) - else: - rois = bboxes.new_zeros((0, 5)) - rois_list.append(rois) - rois = torch.cat(rois_list, 0) - return rois - - -def roi2bbox(rois): - """Convert rois to bounding box format. - - Args: - rois (torch.Tensor): RoIs with the shape (n, 5) where the first - column indicates batch id of each RoI. - - Returns: - list[torch.Tensor]: Converted boxes of corresponding rois. - """ - bbox_list = [] - img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) - for img_id in img_ids: - inds = (rois[:, 0] == img_id.item()) - bbox = rois[inds, 1:] - bbox_list.append(bbox) - return bbox_list - - -def bbox2result(bboxes, labels, num_classes): - """Convert detection results to a list of numpy arrays. - - Args: - bboxes (torch.Tensor | np.ndarray): shape (n, 5) - labels (torch.Tensor | np.ndarray): shape (n, ) - num_classes (int): class number, including background class - - Returns: - list(ndarray): bbox results of each class - """ - if bboxes.shape[0] == 0: - return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)] - else: - if isinstance(bboxes, torch.Tensor): - bboxes = bboxes.detach().cpu().numpy() - labels = labels.detach().cpu().numpy() - return [bboxes[labels == i, :] for i in range(num_classes)] - - -def distance2bbox(points, distance, max_shape=None): - """Decode distance prediction to bounding box. - - Args: - points (Tensor): Shape (B, N, 2) or (N, 2). - distance (Tensor): Distance from the given point to 4 - boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) - max_shape (Sequence[int] or torch.Tensor or Sequence[ - Sequence[int]],optional): Maximum bounds for boxes, specifies - (H, W, C) or (H, W). If priors shape is (B, N, 4), then - the max_shape should be a Sequence[Sequence[int]] - and the length of max_shape should also be B. - - Returns: - Tensor: Boxes with shape (N, 4) or (B, N, 4) - """ - x1 = points[..., 0] - distance[..., 0] - y1 = points[..., 1] - distance[..., 1] - x2 = points[..., 0] + distance[..., 2] - y2 = points[..., 1] + distance[..., 3] - - bboxes = torch.stack([x1, y1, x2, y2], -1) - - if max_shape is not None: - # clip bboxes with dynamic `min` and `max` for onnx - if torch.onnx.is_in_onnx_export(): - from mmdet.core.export import dynamic_clip_for_onnx - x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) - bboxes = torch.stack([x1, y1, x2, y2], dim=-1) - return bboxes - if not isinstance(max_shape, torch.Tensor): - max_shape = x1.new_tensor(max_shape) - max_shape = max_shape[..., :2].type_as(x1) - if max_shape.ndim == 2: - assert bboxes.ndim == 3 - assert max_shape.size(0) == bboxes.size(0) - - min_xy = x1.new_tensor(0) - max_xy = torch.cat([max_shape, max_shape], - dim=-1).flip(-1).unsqueeze(-2) - bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) - bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) - - return bboxes - - -def bbox2distance(points, bbox, max_dis=None, eps=0.1): - """Decode bounding box based on distances. - - Args: - points (Tensor): Shape (n, 2), [x, y]. - bbox (Tensor): Shape (n, 4), "xyxy" format - max_dis (float): Upper bound of the distance. - eps (float): a small value to ensure target < max_dis, instead <= - - Returns: - Tensor: Decoded distances. - """ - left = points[:, 0] - bbox[:, 0] - top = points[:, 1] - bbox[:, 1] - right = bbox[:, 2] - points[:, 0] - bottom = bbox[:, 3] - points[:, 1] - if max_dis is not None: - left = left.clamp(min=0, max=max_dis - eps) - top = top.clamp(min=0, max=max_dis - eps) - right = right.clamp(min=0, max=max_dis - eps) - bottom = bottom.clamp(min=0, max=max_dis - eps) - return torch.stack([left, top, right, bottom], -1) - - -def bbox_rescale(bboxes, scale_factor=1.0): - """Rescale bounding box w.r.t. scale_factor. - - Args: - bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois - scale_factor (float): rescale factor - - Returns: - Tensor: Rescaled bboxes. - """ - if bboxes.size(1) == 5: - bboxes_ = bboxes[:, 1:] - inds_ = bboxes[:, 0] - else: - bboxes_ = bboxes - cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5 - cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5 - w = bboxes_[:, 2] - bboxes_[:, 0] - h = bboxes_[:, 3] - bboxes_[:, 1] - w = w * scale_factor - h = h * scale_factor - x1 = cx - 0.5 * w - x2 = cx + 0.5 * w - y1 = cy - 0.5 * h - y2 = cy + 0.5 * h - if bboxes.size(1) == 5: - rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1) - else: - rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) - return rescaled_bboxes - - -def bbox_cxcywh_to_xyxy(bbox): - """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). - - Args: - bbox (Tensor): Shape (n, 4) for bboxes. - - Returns: - Tensor: Converted bboxes. - """ - cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1) - bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)] - return torch.cat(bbox_new, dim=-1) - - -def bbox_xyxy_to_cxcywh(bbox): - """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). - - Args: - bbox (Tensor): Shape (n, 4) for bboxes. - - Returns: - Tensor: Converted bboxes. - """ - x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1) - bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)] - return torch.cat(bbox_new, dim=-1) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/datasets/wider_face.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/datasets/wider_face.py deleted file mode 100644 index 3a13907db87a9986a7d701837259a0b712fc9dca..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/datasets/wider_face.py +++ /dev/null @@ -1,51 +0,0 @@ -import os.path as osp -import xml.etree.ElementTree as ET - -import mmcv - -from .builder import DATASETS -from .xml_style import XMLDataset - - -@DATASETS.register_module() -class WIDERFaceDataset(XMLDataset): - """Reader for the WIDER Face dataset in PASCAL VOC format. - - Conversion scripts can be found in - https://github.com/sovrasov/wider-face-pascal-voc-annotations - """ - CLASSES = ('face', ) - - def __init__(self, **kwargs): - super(WIDERFaceDataset, self).__init__(**kwargs) - - def load_annotations(self, ann_file): - """Load annotation from WIDERFace XML style annotation file. - - Args: - ann_file (str): Path of XML file. - - Returns: - list[dict]: Annotation info from XML file. - """ - - data_infos = [] - img_ids = mmcv.list_from_file(ann_file) - for img_id in img_ids: - filename = f'{img_id}.jpg' - xml_path = osp.join(self.img_prefix, 'Annotations', - f'{img_id}.xml') - tree = ET.parse(xml_path) - root = tree.getroot() - size = root.find('size') - width = int(size.find('width').text) - height = int(size.find('height').text) - folder = root.find('folder').text - data_infos.append( - dict( - id=img_id, - filename=osp.join(folder, filename), - width=width, - height=height)) - - return data_infos diff --git a/spaces/trttung1610/musicgen/tests/data/__init__.py b/spaces/trttung1610/musicgen/tests/data/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/tests/data/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/trysem/confusion/README.md b/spaces/trysem/confusion/README.md deleted file mode 100644 index a946e76a8713e5341a7b4477fe406e1552ff6295..0000000000000000000000000000000000000000 --- a/spaces/trysem/confusion/README.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: DreamlikeArt-PhotoReal 2.0 -emoji: 📈 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false -duplicated_from: phenomenon1981/DreamlikeArt-PhotoReal-2.0 ---- ---- -title: DreamlikeArt-PhotoReal 2.0 -emoji: 📈 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py \ No newline at end of file diff --git a/spaces/umoubuton/atri-bert-vits2/monotonic_align/core.py b/spaces/umoubuton/atri-bert-vits2/monotonic_align/core.py deleted file mode 100644 index 7c962adea65543ef426034c4d53c4f0e615e8181..0000000000000000000000000000000000000000 --- a/spaces/umoubuton/atri-bert-vits2/monotonic_align/core.py +++ /dev/null @@ -1,46 +0,0 @@ -import numba - - -@numba.jit( - numba.void( - numba.int32[:, :, ::1], - numba.float32[:, :, ::1], - numba.int32[::1], - numba.int32[::1], - ), - nopython=True, - nogil=True, -) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val = -1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y - 1, x] - if x == 0: - if y == 0: - v_prev = 0.0 - else: - v_prev = max_neg_val - else: - v_prev = value[y - 1, x - 1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and ( - index == y or value[y - 1, index] < value[y - 1, index - 1] - ): - index = index - 1 diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/0a12 0001 Driver __TOP__.md b/spaces/usbethFlerru/sovits-modelsV2/example/0a12 0001 Driver __TOP__.md deleted file mode 100644 index a7ce3ffa56930d863228a80ef781b817011f1cfe..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/0a12 0001 Driver __TOP__.md +++ /dev/null @@ -1,20 +0,0 @@ -

                      0a12 0001 driver


                      Download ———>>> https://urlcod.com/2uyX9n



                      - -Kernel module: btusb - -Driver: btusb - -Kernel 5.8.14 - -So what can I do? - -A: - -I have found a solution for that. I used the version of Ubuntu 20.04 LTS with kernel 5.8.15. - -It was build the updated bluetooth. - -apt install bcmwl-kernel-source 4fefd39f24
                      -
                      -
                      -

                      diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Consumed Sub Download LINK.md b/spaces/usbethFlerru/sovits-modelsV2/example/Consumed Sub Download LINK.md deleted file mode 100644 index 7f27ea3d7a739350301206c1a6033cc1833f049f..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Consumed Sub Download LINK.md +++ /dev/null @@ -1,24 +0,0 @@ -
                      -

                      Azure Artifacts enables teams to use feeds and upstream sources to manage their dependencies. You can use Azure Pipelines to publish and download different types of artifacts as part of your CI/CD workflow.

                      -

                      Consumed Sub Download


                      Download ✯✯✯ https://urlcod.com/2uyXOs



                      -

                      Build artifacts are stored on a Windows filesystem, which causes all UNIX permissions to be lost, including the execution bit. You might need to restore the correct UNIX permissions after downloading your artifacts from Azure Pipelines or TFS.

                      -

                      The 24-hour Dietary Recall (24HR) method provides comprehensive, quantitative information on individual diets by querying respondents about the type and quantity of all food and beverages consumed during the previous 24-hour period (Gibson & Ferguson, 2008). A standard multiple pass 24HR includes having the respondent iteratively provide increasingly granular data about each food or drink and its preparation method and other attributes, as well as an estimation of the portion size consumed. The multiple pass approach has been validated in many low- and middle-income countries (Gibson et al., 2017).

                      -

                      Individual-level quantitative dietary data can also be used to develop a better understanding of typical household food preparation, cooking methods, and brand names of foods consumed within the household. Furthermore, if individual-level dietary data are collected in conjunction with information on socioeconomic status, education, and health, the data can be used to examine linkages between income levels and dietary choices, as well as dietary patterns and health outcomes.

                      -

                      French, German, Japanese and Spanish translations of the SASB Standards are available. To download translations of the Standards, please select your industry(ies) and fill out the form.

                      -

                      -

                      You can download an Errata sheet [PDF - 145 KB] describing a minor typographical error identified in the original printing of the 2015-2020 Dietary Guidelines for Americans. This error has been corrected in the online versions.

                      -

                      When downloaded from the Mac App Store, Word, Excel, and PowerPoint require a Microsoft 365 subscription to create and edit documents. To send and receive email, Outlook requires an Microsoft 365 subscription. You can activate an existing Microsoft 365 subscription, or use the in-app purchase option to start a new subscription.

                      -

                      One-time Office licenses (also known as perpetual), such as Office 2019 Home & Business, Office 2019 Home & Student, Office 2016 Home & Business, Office 2016 Home & Student, and Volume License are not compatible with Office apps downloaded from the Mac App Store. These licenses can only be used with Office apps downloaded directly from Microsoft.

                      -

                      With the popularity of streaming platforms like Deezer, Apple Music, YouTube, Netflix, and Amazon Prime comes the question, will I save my data when I download or should I stream directly? The NCC advises you to download.

                      -

                      As Akosionu points out, if you watch ten minutes of a one-hour video on YouTube, you will only be charged for those ten minutes. However, with downloads, you have to wait for the video to download completely before you watch it.

                      -

                      Generally, more users download content online than they upload, so most network providers design the system to facilitate faster downloads. This then makes uploading content i.e an email attachment or WhatsApp status, relatively slower than downloading them.

                      -

                      So if it takes two hours to download a 2GB movie on a 3G network, it might take just twenty minutes on a 4G network. So your 4G network will not use more than 2GB, but it will use it in twenty minutes instead of two hours on a 3G network.

                      -

                      Now, Opensignal has analyzed what this increase in capacity means for mobile users by looking at how much mobile data our 5G and 4G smartphone users consumed. In six leading 5G countries we found that 5G smartphone users on average consumed between 2.7 and 1.7 times more mobile data than 4G users. Usage was greatest in South Korea where our smartphone users reached 38.1 GB of mobile data used. In all six markets 5G users on average consumed more than 15 GB of mobile data.

                      -

                      DynamoDB calculates the number of read capacity units consumed based on item size, not on the amount of data that is returned to an application. For this reason, the number of capacity units consumed is the same whether you request all of the attributes (the default behavior) or just some of them (using a projection expression). The number is also the same whether or not you use a filter expression. Scan consumes a minimum read capacity unit (0.5 with default eventually consistent, 1.0 with strongly consistent) for each partition involved in servicing the request - this includes partitions which do not contain any items.

                      -

                      Patients taking MAOIs can overdose and may show similar side effects, as stated above, except with more severe presentation.[17] Anyone on MAOIs may experience symptoms slowly within the first 24 to 48 hours. However, symptoms can be nonspecific, which range from mild to severe to even life-threatening. Depending on the MAOI prescribed, some can cause patients to go into a coma, and others (e.g., overdosing on tranylcypromine) can result in death.[9] The severity depends on the amount consumed and the type of MAOIs the patient took. For example, phenelzine and tranylcypromine being nonselective and nonreversible, increase the risk of a patient experiencing a hypertensive crisis when ingested with tyramine. However, selegiline is a selective MAO-B inhibitor with less hypertensive risk.[4] Any patient experiencing any of the following: agitation, flushing, tachycardia, hypotension or hypertension, palpations, twitching, increased deep tendon reflexes, seizures, or high fevers should immediately report to a health provider.[9]

                      -

                      The raw dataset is a group of several hundred thousand files, each containing the outputs of an individual building energy model, totaling 17 terabytes. Although processing these results using conventional desktop computing is impractical, several cloud service providers make the required computing power and querying technology available to those with the technical skill set. Additionally, some users may have in-house access to advanced computing resources or want to download a small subset of individual building load profiles for their own custom use cases. To facilitate these use cases, the raw individual building results, along with the corresponding building characteristics, have been published to a public website. They may be downloaded directly from this website or queried in place using big data technologies.

                      -

                      Autodesk provides download and install instructions for individuals and administrators. Your available downloads appear in Autodesk Account. Find your product, select a version, platform, language, and download method. For more information, visit the Autodesk Knowledge Network.

                      -

                      3ds Max is used to model, animate, and render detailed 3D characters, photorealistic designs, and complex scenes for film and TV, games, and design visualization projects.\r\n"}]},"@type":"Question","name":"Who uses 3ds Max?","acceptedAnswer":["@type":"Answer","text":"3ds Max is used by 3D modelers, animators, and lighting artists for game development, film and TV productions, and design visualization projects.\r\n"],"@type":"Question","name":"3ds Max vs Maya","acceptedAnswer":["@type":"Answer","text":"3ds Max and Maya are used by creative studios around the world for animation, modeling, visual effects, and rendering. Learn when to choose 3ds Max and when to choose Maya.\n"],"@type":"Question","name":"How do I download 3ds Max?","acceptedAnswer":["@type":"Answer","text":"Autodesk provides download and install instructions for individuals and administrators. Your available downloads appear in Autodesk Account. Find your product, select a version, platform, language, and download method. For more information, visit the Autodesk Knowledge Network.\n"],"@type":"Question","name":"Can I install 3ds Max on multiple computers?","acceptedAnswer":["@type":"Answer","text":"With a subscription to 3ds Max software, you can install it on up to 3 computers or other devices. However, only the named user can sign in and use that software on a single computer at any given time. Please refer to the\u202fSoftware License Agreement for more information.\r\n"],"@type":"Question","name":"How do I convert my 3ds Max free trial to a paid subscription?","acceptedAnswer":["@type":"Answer","text":"Launch your trial software and click Subscribe Now on the trial screen or buy 3ds Max here. When buying your subscription, enter the same email address and password combination you used to sign in to your trial. Learn more about\u202fconverting a trial to a paid subscription.\r\n"],"@type":"Question","name":"How much does a 3ds Max subscription cost?","acceptedAnswer":["@type":"Answer","text":"The price of an annual 3ds Max subscription is\u202f\u202fand the price of a monthly 3ds Max subscription is\u202f. The price of a 3-year 3ds Max subscription is\u202f. If you have infrequent users and are interested in a pay-as-you-go option, please visit www.autodesk.com/flex to learn more.\r\n"]],"@type":"FAQPage","@context":" "} Autodesk Company overview Careers Investor relations Newsroom Diversity and belonging

                    Autodesk Foundation Sustainability Contact us Students and educators Affiliate program Autodesk Research How to buy

                    -

                    Remote procedure call (RPC) systems, including Java RMI, are synchronous -- the caller must block and wait until the called method completes execution, and thus offer no potential for developing loosely coupled enterprise applications without the use of multiple threads. In other words, RPC systems require the client and the server to be available at the same time. However, such tight coupling may not be possible or desired in some applications. Message-Oriented Middleware (MOM) systems provide solutions to such problems. They are based on the asynchronous interaction model, and provide the abstraction of a message queue that can be accessed across a network. Note, however, that messaging here refers to asynchronous requests or events that are consumed by enterprise applications and not humans as in electronic mail (email). These messages contain formatted data that describe specific business actions.

                    aaccfb2cb3
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/vanessa9178/anime-anything-v4.0/app.py b/spaces/vanessa9178/anime-anything-v4.0/app.py deleted file mode 100644 index 47a2051db6dadeea03edf70d62694fd3e5e88ba7..0000000000000000000000000000000000000000 --- a/spaces/vanessa9178/anime-anything-v4.0/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/andite/anything-v4.0").launch() \ No newline at end of file diff --git a/spaces/wffcyrus/MetaGPT-v1/metagpt/document_store/qdrant_store.py b/spaces/wffcyrus/MetaGPT-v1/metagpt/document_store/qdrant_store.py deleted file mode 100644 index 98b82cf872ae0514487f88dbeadb7682da36f877..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/metagpt/document_store/qdrant_store.py +++ /dev/null @@ -1,129 +0,0 @@ -from dataclasses import dataclass -from typing import List - -from qdrant_client import QdrantClient -from qdrant_client.models import Filter, PointStruct, VectorParams - -from metagpt.document_store.base_store import BaseStore - - -@dataclass -class QdrantConnection: - """ - Args: - url: qdrant url - host: qdrant host - port: qdrant port - memory: qdrant service use memory mode - api_key: qdrant cloud api_key - """ - url: str = None - host: str = None - port: int = None - memory: bool = False - api_key: str = None - - -class QdrantStore(BaseStore): - def __init__(self, connect: QdrantConnection): - if connect.memory: - self.client = QdrantClient(":memory:") - elif connect.url: - self.client = QdrantClient(url=connect.url, api_key=connect.api_key) - elif connect.host and connect.port: - self.client = QdrantClient( - host=connect.host, port=connect.port, api_key=connect.api_key - ) - else: - raise Exception("please check QdrantConnection.") - - def create_collection( - self, - collection_name: str, - vectors_config: VectorParams, - force_recreate=False, - **kwargs, - ): - """ - create a collection - Args: - collection_name: collection name - vectors_config: VectorParams object,detail in https://github.com/qdrant/qdrant-client - force_recreate: default is False, if True, will delete exists collection,then create it - **kwargs: - - Returns: - - """ - try: - self.client.get_collection(collection_name) - if force_recreate: - res = self.client.recreate_collection( - collection_name, vectors_config=vectors_config, **kwargs - ) - return res - return True - except: # noqa: E722 - return self.client.recreate_collection( - collection_name, vectors_config=vectors_config, **kwargs - ) - - def has_collection(self, collection_name: str): - try: - self.client.get_collection(collection_name) - return True - except: # noqa: E722 - return False - - def delete_collection(self, collection_name: str, timeout=60): - res = self.client.delete_collection(collection_name, timeout=timeout) - if not res: - raise Exception(f"Delete collection {collection_name} failed.") - - def add(self, collection_name: str, points: List[PointStruct]): - """ - add some vector data to qdrant - Args: - collection_name: collection name - points: list of PointStruct object, about PointStruct detail in https://github.com/qdrant/qdrant-client - - Returns: NoneX - - """ - # self.client.upload_records() - self.client.upsert( - collection_name, - points, - ) - - def search( - self, - collection_name: str, - query: List[float], - query_filter: Filter = None, - k=10, - return_vector=False, - ): - """ - vector search - Args: - collection_name: qdrant collection name - query: input vector - query_filter: Filter object, detail in https://github.com/qdrant/qdrant-client - k: return the most similar k pieces of data - return_vector: whether return vector - - Returns: list of dict - - """ - hits = self.client.search( - collection_name=collection_name, - query_vector=query, - query_filter=query_filter, - limit=k, - with_vectors=return_vector, - ) - return [hit.__dict__ for hit in hits] - - def write(self, *args, **kwargs): - pass diff --git a/spaces/wffcyrus/MetaGPT-v1/metagpt/roles/researcher.py b/spaces/wffcyrus/MetaGPT-v1/metagpt/roles/researcher.py deleted file mode 100644 index cb4d28c339ad05700d106afa04579bbeb31c1863..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/metagpt/roles/researcher.py +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env python -""" -@Modified By: mashenquan, 2023/8/22. A definition has been provided for the return value of _think: returning false indicates that further reasoning cannot continue. - -""" - -import asyncio - -from pydantic import BaseModel - -from metagpt.actions import CollectLinks, ConductResearch, WebBrowseAndSummarize -from metagpt.actions.research import get_research_system_text -from metagpt.const import RESEARCH_PATH -from metagpt.logs import logger -from metagpt.roles import Role -from metagpt.schema import Message - - -class Report(BaseModel): - topic: str - links: dict[str, list[str]] = None - summaries: list[tuple[str, str]] = None - content: str = "" - - -class Researcher(Role): - def __init__( - self, - name: str = "David", - profile: str = "Researcher", - goal: str = "Gather information and conduct research", - constraints: str = "Ensure accuracy and relevance of information", - language: str = "en-us", - **kwargs, - ): - super().__init__(name, profile, goal, constraints, **kwargs) - self._init_actions([CollectLinks(name), WebBrowseAndSummarize(name), ConductResearch(name)]) - self.language = language - if language not in ("en-us", "zh-cn"): - logger.warning(f"The language `{language}` has not been tested, it may not work.") - - async def _think(self) -> bool: - if self._rc.todo is None: - self._set_state(0) - return True - - if self._rc.state + 1 < len(self._states): - self._set_state(self._rc.state + 1) - else: - self._rc.todo = None - return False - - async def _act(self) -> Message: - logger.info(f"{self._setting}: ready to {self._rc.todo}") - todo = self._rc.todo - msg = self._rc.memory.get(k=1)[0] - if isinstance(msg.instruct_content, Report): - instruct_content = msg.instruct_content - topic = instruct_content.topic - else: - topic = msg.content - - research_system_text = get_research_system_text(topic, self.language) - if isinstance(todo, CollectLinks): - links = await todo.run(topic, 4, 4) - ret = Message("", Report(topic=topic, links=links), role=self.profile, cause_by=type(todo)) - elif isinstance(todo, WebBrowseAndSummarize): - links = instruct_content.links - todos = (todo.run(*url, query=query, system_text=research_system_text) for (query, url) in links.items()) - summaries = await asyncio.gather(*todos) - summaries = list((url, summary) for i in summaries for (url, summary) in i.items() if summary) - ret = Message("", Report(topic=topic, summaries=summaries), role=self.profile, cause_by=type(todo)) - else: - summaries = instruct_content.summaries - summary_text = "\n---\n".join(f"url: {url}\nsummary: {summary}" for (url, summary) in summaries) - content = await self._rc.todo.run(topic, summary_text, system_text=research_system_text) - ret = Message("", Report(topic=topic, content=content), role=self.profile, cause_by=type(self._rc.todo)) - self._rc.memory.add(ret) - return ret - - async def _react(self) -> Message: - while True: - await self._think() - if self._rc.todo is None: - break - msg = await self._act() - report = msg.instruct_content - self.write_report(report.topic, report.content) - return msg - - def write_report(self, topic: str, content: str): - if not RESEARCH_PATH.exists(): - RESEARCH_PATH.mkdir(parents=True) - filepath = RESEARCH_PATH / f"{topic}.md" - filepath.write_text(content) - - -if __name__ == "__main__": - import fire - - async def main(topic: str, language="en-us"): - role = Researcher(topic, language=language) - await role.run(topic) - - fire.Fire(main) diff --git a/spaces/wgpubs/fastai_2022_session1_is_marvel_character/README.md b/spaces/wgpubs/fastai_2022_session1_is_marvel_character/README.md deleted file mode 100644 index 1226db9623020acd5e9279e36cecbebda0e2abb1..0000000000000000000000000000000000000000 --- a/spaces/wgpubs/fastai_2022_session1_is_marvel_character/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Is it a Marvel Character? -emoji: 🦸🦸‍♀️ 🦹🦹‍♀️ -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: wtfpl ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/whgwd2023/bingo/src/app/loading.css b/spaces/whgwd2023/bingo/src/app/loading.css deleted file mode 100644 index eaaab6a86a228334c4eca3c5368ae6f0f593d405..0000000000000000000000000000000000000000 --- a/spaces/whgwd2023/bingo/src/app/loading.css +++ /dev/null @@ -1,68 +0,0 @@ -::-webkit-scrollbar { - width: 10px; - height: 10px; - display: none; -} - -::-webkit-scrollbar-button:start:decrement, -::-webkit-scrollbar-button:end:increment { - height: 30px; - background-color: transparent; -} - -::-webkit-scrollbar-track-piece { - background-color: #3b3b3b; - -webkit-border-radius: 16px; -} - -::-webkit-scrollbar-thumb:vertical { - height: 50px; - background-color: #666; - border: 1px solid #eee; - -webkit-border-radius: 6px; -} - -/* loading start */ -.loading-spinner { - display: flex; - justify-content: center; - align-items: center; - height: 100vh; - opacity: 1; - transition: opacity .8s ease-out; -} - -.loading-spinner.hidden { - opacity: 0; -} - -.loading-spinner>div { - width: 30px; - height: 30px; - background: linear-gradient(90deg, #2870EA 10.79%, #1B4AEF 87.08%); - - border-radius: 100%; - display: inline-block; - animation: sk-bouncedelay 1.4s infinite ease-in-out both; -} - -.loading-spinner .bounce1 { - animation-delay: -0.32s; -} - -.loading-spinner .bounce2 { - animation-delay: -0.16s; -} - -@keyframes sk-bouncedelay { - - 0%, - 80%, - 100% { - transform: scale(0); - } - - 40% { - transform: scale(1.0); - } -} diff --git a/spaces/wong26/faster-whisper-webui/src/config.py b/spaces/wong26/faster-whisper-webui/src/config.py deleted file mode 100644 index bd2b51478c39ce91fa55e2a8d801d9a7cf6d662e..0000000000000000000000000000000000000000 --- a/spaces/wong26/faster-whisper-webui/src/config.py +++ /dev/null @@ -1,154 +0,0 @@ -from enum import Enum -import urllib - -import os -from typing import List -from urllib.parse import urlparse -import json5 -import torch - -from tqdm import tqdm - -class ModelConfig: - def __init__(self, name: str, url: str, path: str = None, type: str = "whisper"): - """ - Initialize a model configuration. - - name: Name of the model - url: URL to download the model from - path: Path to the model file. If not set, the model will be downloaded from the URL. - type: Type of model. Can be whisper or huggingface. - """ - self.name = name - self.url = url - self.path = path - self.type = type - -VAD_INITIAL_PROMPT_MODE_VALUES=["prepend_all_segments", "prepend_first_segment", "json_prompt_mode"] - -class VadInitialPromptMode(Enum): - PREPEND_ALL_SEGMENTS = 1 - PREPREND_FIRST_SEGMENT = 2 - JSON_PROMPT_MODE = 3 - - @staticmethod - def from_string(s: str): - normalized = s.lower() if s is not None else None - - if normalized == "prepend_all_segments": - return VadInitialPromptMode.PREPEND_ALL_SEGMENTS - elif normalized == "prepend_first_segment": - return VadInitialPromptMode.PREPREND_FIRST_SEGMENT - elif normalized == "json_prompt_mode": - return VadInitialPromptMode.JSON_PROMPT_MODE - elif normalized is not None and normalized != "": - raise ValueError(f"Invalid value for VadInitialPromptMode: {s}") - else: - return None - -class ApplicationConfig: - def __init__(self, models: List[ModelConfig] = [], input_audio_max_duration: int = 600, - share: bool = False, server_name: str = None, server_port: int = 7860, - queue_concurrency_count: int = 1, delete_uploaded_files: bool = True, - whisper_implementation: str = "whisper", - default_model_name: str = "medium", default_vad: str = "silero-vad", - vad_parallel_devices: str = "", vad_cpu_cores: int = 1, vad_process_timeout: int = 1800, - auto_parallel: bool = False, output_dir: str = None, - model_dir: str = None, device: str = None, - verbose: bool = True, task: str = "transcribe", language: str = None, - vad_initial_prompt_mode: str = "prepend_first_segment ", - vad_merge_window: float = 5, vad_max_merge_size: float = 30, - vad_padding: float = 1, vad_prompt_window: float = 3, - temperature: float = 0, best_of: int = 5, beam_size: int = 5, - patience: float = None, length_penalty: float = None, - suppress_tokens: str = "-1", initial_prompt: str = None, - condition_on_previous_text: bool = True, fp16: bool = True, - compute_type: str = "float16", - temperature_increment_on_fallback: float = 0.2, compression_ratio_threshold: float = 2.4, - logprob_threshold: float = -1.0, no_speech_threshold: float = 0.6, - # Word timestamp settings - word_timestamps: bool = False, prepend_punctuations: str = "\"\'“¿([{-", - append_punctuations: str = "\"\'.。,,!!??::”)]}、", - highlight_words: bool = False): - - self.models = models - - # WebUI settings - self.input_audio_max_duration = input_audio_max_duration - self.share = share - self.server_name = server_name - self.server_port = server_port - self.queue_concurrency_count = queue_concurrency_count - self.delete_uploaded_files = delete_uploaded_files - - self.whisper_implementation = whisper_implementation - self.default_model_name = default_model_name - self.default_vad = default_vad - self.vad_parallel_devices = vad_parallel_devices - self.vad_cpu_cores = vad_cpu_cores - self.vad_process_timeout = vad_process_timeout - self.auto_parallel = auto_parallel - self.output_dir = output_dir - - self.model_dir = model_dir - self.device = device - self.verbose = verbose - self.task = task - self.language = language - self.vad_initial_prompt_mode = vad_initial_prompt_mode - self.vad_merge_window = vad_merge_window - self.vad_max_merge_size = vad_max_merge_size - self.vad_padding = vad_padding - self.vad_prompt_window = vad_prompt_window - self.temperature = temperature - self.best_of = best_of - self.beam_size = beam_size - self.patience = patience - self.length_penalty = length_penalty - self.suppress_tokens = suppress_tokens - self.initial_prompt = initial_prompt - self.condition_on_previous_text = condition_on_previous_text - self.fp16 = fp16 - self.compute_type = compute_type - self.temperature_increment_on_fallback = temperature_increment_on_fallback - self.compression_ratio_threshold = compression_ratio_threshold - self.logprob_threshold = logprob_threshold - self.no_speech_threshold = no_speech_threshold - - # Word timestamp settings - self.word_timestamps = word_timestamps - self.prepend_punctuations = prepend_punctuations - self.append_punctuations = append_punctuations - self.highlight_words = highlight_words - - def get_model_names(self): - return [ x.name for x in self.models ] - - def update(self, **new_values): - result = ApplicationConfig(**self.__dict__) - - for key, value in new_values.items(): - setattr(result, key, value) - return result - - @staticmethod - def create_default(**kwargs): - app_config = ApplicationConfig.parse_file(os.environ.get("WHISPER_WEBUI_CONFIG", "config.json5")) - - # Update with kwargs - if len(kwargs) > 0: - app_config = app_config.update(**kwargs) - return app_config - - @staticmethod - def parse_file(config_path: str): - import json5 - - with open(config_path, "r", encoding="utf-8") as f: - # Load using json5 - data = json5.load(f) - data_models = data.pop("models", []) - - models = [ ModelConfig(**x) for x in data_models ] - - return ApplicationConfig(models, **data) diff --git a/spaces/xp3857/Image_Restoration_Colorization/Global/options/train_options.py b/spaces/xp3857/Image_Restoration_Colorization/Global/options/train_options.py deleted file mode 100644 index 6cc3296657043568a3a961d793f2c69f568bab1a..0000000000000000000000000000000000000000 --- a/spaces/xp3857/Image_Restoration_Colorization/Global/options/train_options.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .base_options import BaseOptions - -class TrainOptions(BaseOptions): - def initialize(self): - BaseOptions.initialize(self) - # for displays - self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen') - self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') - self.parser.add_argument('--save_latest_freq', type=int, default=10000, help='frequency of saving the latest results') - self.parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs') - self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') - self.parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration') - - # for training - self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') - # self.parser.add_argument('--load_pretrain', type=str, default='', help='load the pretrained model from the specified location') - self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') - self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') - self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') - self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') - self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') - self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') - self.parser.add_argument('--training_dataset',type=str,default='',help='training use which dataset') - - # for discriminators - self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use') - self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') - self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') - self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss') - self.parser.add_argument('--l2_feat', type=float, help='weight for feature mapping loss') - self.parser.add_argument('--use_l1_feat', action='store_true', help='use l1 for feat mapping') - self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss') - self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss') - self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') - self.parser.add_argument('--gan_type', type=str, default='lsgan', help='Choose the loss type of GAN') - self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images') - self.parser.add_argument('--norm_D',type=str, default='spectralinstance', help='instance normalization or batch normalization') - self.parser.add_argument('--init_D',type=str,default='xavier',help='normal|xavier|xavier_uniform|kaiming|orthogonal|none') - - self.parser.add_argument('--no_TTUR',action='store_true',help='No TTUR') - - self.parser.add_argument('--start_epoch',type=int,default=-1,help='write the start_epoch of iter.txt into this parameter') - self.parser.add_argument('--no_degradation',action='store_true',help='when train the mapping, enable this parameter --> no degradation will be added into clean image') - self.parser.add_argument('--no_load_VAE',action='store_true',help='when train the mapping, enable this parameter --> random initialize the encoder an decoder') - self.parser.add_argument('--use_v2_degradation',action='store_true',help='enable this parameter --> 4 kinds of degradations will be used to synthesize corruption') - self.parser.add_argument('--use_vae_which_epoch',type=str,default='200') - - - self.parser.add_argument('--use_focal_loss',action='store_true') - - self.parser.add_argument('--mask_need_scale',action='store_true',help='enable this param means that the pixel range of mask is 0-255') - self.parser.add_argument('--positive_weight',type=float,default=1.0,help='(For scratch detection) Since the scratch number is less, and we use a weight strategy. This parameter means that we want to decrease the weight.') - - self.parser.add_argument('--no_update_lr',action='store_true',help='use this means we do not update the LR while training') - - - self.isTrain = True diff --git a/spaces/xuxw98/TAPA/README.md b/spaces/xuxw98/TAPA/README.md deleted file mode 100644 index f44983e5ae1e542bd22f607741c2e73dfbe0879b..0000000000000000000000000000000000000000 --- a/spaces/xuxw98/TAPA/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: TAPA -emoji: 👀 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/xxccc/gpt-academic/request_llm/bridge_chatglm.py b/spaces/xxccc/gpt-academic/request_llm/bridge_chatglm.py deleted file mode 100644 index 100783d248c4cd6dcbdb091181ac21f0f66af670..0000000000000000000000000000000000000000 --- a/spaces/xxccc/gpt-academic/request_llm/bridge_chatglm.py +++ /dev/null @@ -1,161 +0,0 @@ - -from transformers import AutoModel, AutoTokenizer -import time -import threading -import importlib -from toolbox import update_ui, get_conf -from multiprocessing import Process, Pipe - -load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" - -################################################################################# -class GetGLMHandle(Process): - def __init__(self): - super().__init__(daemon=True) - self.parent, self.child = Pipe() - self.chatglm_model = None - self.chatglm_tokenizer = None - self.info = "" - self.success = True - self.check_dependency() - self.start() - self.threadLock = threading.Lock() - - def check_dependency(self): - try: - import sentencepiece - self.info = "依赖检测通过" - self.success = True - except: - self.info = "缺少ChatGLM的依赖,如果要使用ChatGLM,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。" - self.success = False - - def ready(self): - return self.chatglm_model is not None - - def run(self): - # 子进程执行 - # 第一次运行,加载参数 - retry = 0 - while True: - try: - if self.chatglm_model is None: - self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) - device, = get_conf('LOCAL_MODEL_DEVICE') - if device=='cpu': - self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() - else: - self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() - self.chatglm_model = self.chatglm_model.eval() - break - else: - break - except: - retry += 1 - if retry > 3: - self.child.send('[Local Message] Call ChatGLM fail 不能正常加载ChatGLM的参数。') - raise RuntimeError("不能正常加载ChatGLM的参数!") - - while True: - # 进入任务等待状态 - kwargs = self.child.recv() - # 收到消息,开始请求 - try: - for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs): - self.child.send(response) - # # 中途接收可能的终止指令(如果有的话) - # if self.child.poll(): - # command = self.child.recv() - # if command == '[Terminate]': break - except: - from toolbox import trimmed_format_exc - self.child.send('[Local Message] Call ChatGLM fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n') - # 请求处理结束,开始下一个循环 - self.child.send('[Finish]') - - def stream_chat(self, **kwargs): - # 主进程执行 - self.threadLock.acquire() - self.parent.send(kwargs) - while True: - res = self.parent.recv() - if res != '[Finish]': - yield res - else: - break - self.threadLock.release() - -global glm_handle -glm_handle = None -################################################################################# -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): - """ - 多线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - global glm_handle - if glm_handle is None: - glm_handle = GetGLMHandle() - if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glm_handle.info - if not glm_handle.success: - error = glm_handle.info - glm_handle = None - raise RuntimeError(error) - - # chatglm 没有 sys_prompt 接口,因此把prompt加入 history - history_feedin = [] - history_feedin.append(["What can I do?", sys_prompt]) - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 - response = "" - for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("程序终止。") - return response - - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 单线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - chatbot.append((inputs, "")) - - global glm_handle - if glm_handle is None: - glm_handle = GetGLMHandle() - chatbot[-1] = (inputs, load_message + "\n\n" + glm_handle.info) - yield from update_ui(chatbot=chatbot, history=[]) - if not glm_handle.success: - glm_handle = None - return - - if additional_fn is not None: - import core_functional - importlib.reload(core_functional) # 热更新prompt - core_functional = core_functional.get_core_functions() - if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) - inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] - - # 处理历史信息 - history_feedin = [] - history_feedin.append(["What can I do?", system_prompt] ) - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - # 开始接收chatglm的回复 - response = "[Local Message]: 等待ChatGLM响应中 ..." - for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - chatbot[-1] = (inputs, response) - yield from update_ui(chatbot=chatbot, history=history) - - # 总结输出 - if response == "[Local Message]: 等待ChatGLM响应中 ...": - response = "[Local Message]: ChatGLM响应异常 ..." - history.extend([inputs, response]) - yield from update_ui(chatbot=chatbot, history=history) diff --git a/spaces/ygangang/VToonify/vtoonify/model/dualstylegan.py b/spaces/ygangang/VToonify/vtoonify/model/dualstylegan.py deleted file mode 100644 index 60d9850ad049a2751781871d6ae0c2779ecc863f..0000000000000000000000000000000000000000 --- a/spaces/ygangang/VToonify/vtoonify/model/dualstylegan.py +++ /dev/null @@ -1,203 +0,0 @@ -import random -import torch -from torch import nn -from model.stylegan.model import ConvLayer, PixelNorm, EqualLinear, Generator - -class AdaptiveInstanceNorm(nn.Module): - def __init__(self, fin, style_dim=512): - super().__init__() - - self.norm = nn.InstanceNorm2d(fin, affine=False) - self.style = nn.Linear(style_dim, fin * 2) - - self.style.bias.data[:fin] = 1 - self.style.bias.data[fin:] = 0 - - def forward(self, input, style): - style = self.style(style).unsqueeze(2).unsqueeze(3) - gamma, beta = style.chunk(2, 1) - out = self.norm(input) - out = gamma * out + beta - return out - -# modulative residual blocks (ModRes) -class AdaResBlock(nn.Module): - def __init__(self, fin, style_dim=512, dilation=1): # modified - super().__init__() - - self.conv = ConvLayer(fin, fin, 3, dilation=dilation) # modified - self.conv2 = ConvLayer(fin, fin, 3, dilation=dilation) # modified - self.norm = AdaptiveInstanceNorm(fin, style_dim) - self.norm2 = AdaptiveInstanceNorm(fin, style_dim) - - # model initialization - # the convolution filters are set to values close to 0 to produce negligible residual features - self.conv[0].weight.data *= 0.01 - self.conv2[0].weight.data *= 0.01 - - def forward(self, x, s, w=1): - skip = x - if w == 0: - return skip - out = self.conv(self.norm(x, s)) - out = self.conv2(self.norm2(out, s)) - out = out * w + skip - return out - -class DualStyleGAN(nn.Module): - def __init__(self, size, style_dim, n_mlp, channel_multiplier=2, twoRes=True, res_index=6): - super().__init__() - - layers = [PixelNorm()] - for i in range(n_mlp-6): - layers.append(EqualLinear(512, 512, lr_mul=0.01, activation="fused_lrelu")) - # color transform blocks T_c - self.style = nn.Sequential(*layers) - # StyleGAN2 - self.generator = Generator(size, style_dim, n_mlp, channel_multiplier) - # The extrinsic style path - self.res = nn.ModuleList() - self.res_index = res_index//2 * 2 - self.res.append(AdaResBlock(self.generator.channels[2 ** 2])) # for conv1 - for i in range(3, self.generator.log_size + 1): - out_channel = self.generator.channels[2 ** i] - if i < 3 + self.res_index//2: - # ModRes - self.res.append(AdaResBlock(out_channel)) - self.res.append(AdaResBlock(out_channel)) - else: - # structure transform block T_s - self.res.append(EqualLinear(512, 512)) - # FC layer is initialized with identity matrices, meaning no changes to the input latent code - self.res[-1].weight.data = torch.eye(512) * 512.0**0.5 + torch.randn(512, 512) * 0.01 - self.res.append(EqualLinear(512, 512)) - self.res[-1].weight.data = torch.eye(512) * 512.0**0.5 + torch.randn(512, 512) * 0.01 - self.res.append(EqualLinear(512, 512)) # for to_rgb7 - self.res[-1].weight.data = torch.eye(512) * 512.0**0.5 + torch.randn(512, 512) * 0.01 - self.size = self.generator.size - self.style_dim = self.generator.style_dim - self.log_size = self.generator.log_size - self.num_layers = self.generator.num_layers - self.n_latent = self.generator.n_latent - self.channels = self.generator.channels - - def forward( - self, - styles, # intrinsic style code - exstyles, # extrinsic style code - return_latents=False, - return_feat=False, - inject_index=None, - truncation=1, - truncation_latent=None, - input_is_latent=False, - noise=None, - randomize_noise=True, - z_plus_latent=False, # intrinsic style code is z+ or z - use_res=True, # whether to use the extrinsic style path - fuse_index=18, # layers > fuse_index do not use the extrinsic style path - interp_weights=[1]*18, # weight vector for style combination of two paths - ): - - if not input_is_latent: - if not z_plus_latent: - styles = [self.generator.style(s) for s in styles] - else: - styles = [self.generator.style(s.reshape(s.shape[0]*s.shape[1], s.shape[2])).reshape(s.shape) for s in styles] - - if noise is None: - if randomize_noise: - noise = [None] * self.generator.num_layers - else: - noise = [ - getattr(self.generator.noises, f"noise_{i}") for i in range(self.generator.num_layers) - ] - - if truncation < 1: - style_t = [] - - for style in styles: - style_t.append( - truncation_latent + truncation * (style - truncation_latent) - ) - - styles = style_t - - if len(styles) < 2: - inject_index = self.generator.n_latent - - if styles[0].ndim < 3: - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - - else: - latent = styles[0] - - else: - if inject_index is None: - inject_index = random.randint(1, self.generator.n_latent - 1) - - if styles[0].ndim < 3: - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - latent2 = styles[1].unsqueeze(1).repeat(1, self.generator.n_latent - inject_index, 1) - - latent = torch.cat([latent, latent2], 1) - else: - latent = torch.cat([styles[0][:,0:inject_index], styles[1][:,inject_index:]], 1) - - if use_res: - if exstyles.ndim < 3: - resstyles = self.style(exstyles).unsqueeze(1).repeat(1, self.generator.n_latent, 1) - adastyles = exstyles.unsqueeze(1).repeat(1, self.generator.n_latent, 1) - else: - nB, nL, nD = exstyles.shape - resstyles = self.style(exstyles.reshape(nB*nL, nD)).reshape(nB, nL, nD) - adastyles = exstyles - - out = self.generator.input(latent) - out = self.generator.conv1(out, latent[:, 0], noise=noise[0]) - if use_res and fuse_index > 0: - out = self.res[0](out, resstyles[:, 0], interp_weights[0]) - - skip = self.generator.to_rgb1(out, latent[:, 1]) - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip( - self.generator.convs[::2], self.generator.convs[1::2], noise[1::2], noise[2::2], self.generator.to_rgbs): - if use_res and fuse_index >= i and i > self.res_index: - out = conv1(out, interp_weights[i] * self.res[i](adastyles[:, i]) + - (1-interp_weights[i]) * latent[:, i], noise=noise1) - else: - out = conv1(out, latent[:, i], noise=noise1) - if use_res and fuse_index >= i and i <= self.res_index: - out = self.res[i](out, resstyles[:, i], interp_weights[i]) - if use_res and fuse_index >= (i+1) and i > self.res_index: - out = conv2(out, interp_weights[i+1] * self.res[i+1](adastyles[:, i+1]) + - (1-interp_weights[i+1]) * latent[:, i+1], noise=noise2) - else: - out = conv2(out, latent[:, i + 1], noise=noise2) - if use_res and fuse_index >= (i+1) and i <= self.res_index: - out = self.res[i+1](out, resstyles[:, i+1], interp_weights[i+1]) - if use_res and fuse_index >= (i+2) and i >= self.res_index-1: - skip = to_rgb(out, interp_weights[i+2] * self.res[i+2](adastyles[:, i+2]) + - (1-interp_weights[i+2]) * latent[:, i + 2], skip) - else: - skip = to_rgb(out, latent[:, i + 2], skip) - i += 2 - if i > self.res_index and return_feat: - return out, skip - - image = skip - - if return_latents: - return image, latent - - else: - return image, None - - def make_noise(self): - return self.generator.make_noise() - - def mean_latent(self, n_latent): - return self.generator.mean_latent(n_latent) - - def get_latent(self, input): - return self.generator.style(input) \ No newline at end of file diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/cluster/kmeans.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/cluster/kmeans.py deleted file mode 100644 index 6111ea45e66a15d41b5b904be6f75affd3c4369f..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/cluster/kmeans.py +++ /dev/null @@ -1,201 +0,0 @@ -import math,pdb -import torch,pynvml -from torch.nn.functional import normalize -from time import time -import numpy as np -# device=torch.device("cuda:0") -def _kpp(data: torch.Tensor, k: int, sample_size: int = -1): - """ Picks k points in the data based on the kmeans++ method. - - Parameters - ---------- - data : torch.Tensor - Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D - data, rank 2 multidimensional data, in which case one - row is one observation. - k : int - Number of samples to generate. - sample_size : int - sample data to avoid memory overflow during calculation - - Returns - ------- - init : ndarray - A 'k' by 'N' containing the initial centroids. - - References - ---------- - .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of - careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium - on Discrete Algorithms, 2007. - .. [2] scipy/cluster/vq.py: _kpp - """ - batch_size=data.shape[0] - if batch_size>sample_size: - data = data[torch.randint(0, batch_size,[sample_size], device=data.device)] - dims = data.shape[1] if len(data.shape) > 1 else 1 - init = torch.zeros((k, dims)).to(data.device) - r = torch.distributions.uniform.Uniform(0, 1) - for i in range(k): - if i == 0: - init[i, :] = data[torch.randint(data.shape[0], [1])] - else: - D2 = torch.cdist(init[:i, :][None, :], data[None, :], p=2)[0].amin(dim=0) - probs = D2 / torch.sum(D2) - cumprobs = torch.cumsum(probs, dim=0) - init[i, :] = data[torch.searchsorted(cumprobs, r.sample([1]).to(data.device))] - return init -class KMeansGPU: - ''' - Kmeans clustering algorithm implemented with PyTorch - - Parameters: - n_clusters: int, - Number of clusters - - max_iter: int, default: 100 - Maximum number of iterations - - tol: float, default: 0.0001 - Tolerance - - verbose: int, default: 0 - Verbosity - - mode: {'euclidean', 'cosine'}, default: 'euclidean' - Type of distance measure - - init_method: {'random', 'point', '++'} - Type of initialization - - minibatch: {None, int}, default: None - Batch size of MinibatchKmeans algorithm - if None perform full KMeans algorithm - - Attributes: - centroids: torch.Tensor, shape: [n_clusters, n_features] - cluster centroids - ''' - def __init__(self, n_clusters, max_iter=200, tol=1e-4, verbose=0, mode="euclidean",device=torch.device("cuda:0")): - self.n_clusters = n_clusters - self.max_iter = max_iter - self.tol = tol - self.verbose = verbose - self.mode = mode - self.device=device - pynvml.nvmlInit() - gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(device.index) - info = pynvml.nvmlDeviceGetMemoryInfo(gpu_handle) - self.minibatch=int(33e6/self.n_clusters*info.free/ 1024 / 1024 / 1024) - print("free_mem/GB:",info.free/ 1024 / 1024 / 1024,"minibatch:",self.minibatch) - - @staticmethod - def cos_sim(a, b): - """ - Compute cosine similarity of 2 sets of vectors - - Parameters: - a: torch.Tensor, shape: [m, n_features] - - b: torch.Tensor, shape: [n, n_features] - """ - return normalize(a, dim=-1) @ normalize(b, dim=-1).transpose(-2, -1) - - @staticmethod - def euc_sim(a, b): - """ - Compute euclidean similarity of 2 sets of vectors - Parameters: - a: torch.Tensor, shape: [m, n_features] - b: torch.Tensor, shape: [n, n_features] - """ - return 2 * a @ b.transpose(-2, -1) -(a**2).sum(dim=1)[..., :, None] - (b**2).sum(dim=1)[..., None, :] - - def max_sim(self, a, b): - """ - Compute maximum similarity (or minimum distance) of each vector - in a with all of the vectors in b - Parameters: - a: torch.Tensor, shape: [m, n_features] - b: torch.Tensor, shape: [n, n_features] - """ - if self.mode == 'cosine': - sim_func = self.cos_sim - elif self.mode == 'euclidean': - sim_func = self.euc_sim - sim = sim_func(a, b) - max_sim_v, max_sim_i = sim.max(dim=-1) - return max_sim_v, max_sim_i - - def fit_predict(self, X): - """ - Combination of fit() and predict() methods. - This is faster than calling fit() and predict() seperately. - Parameters: - X: torch.Tensor, shape: [n_samples, n_features] - centroids: {torch.Tensor, None}, default: None - if given, centroids will be initialized with given tensor - if None, centroids will be randomly chosen from X - Return: - labels: torch.Tensor, shape: [n_samples] - - mini_=33kk/k*remain - mini=min(mini_,fea_shape) - offset=log2(k/1000)*1.5 - kpp_all=min(mini_*10/offset,fea_shape) - kpp_sample=min(mini_/12/offset,fea_shape) - """ - assert isinstance(X, torch.Tensor), "input must be torch.Tensor" - assert X.dtype in [torch.half, torch.float, torch.double], "input must be floating point" - assert X.ndim == 2, "input must be a 2d tensor with shape: [n_samples, n_features] " - # print("verbose:%s"%self.verbose) - - offset = np.power(1.5,np.log(self.n_clusters / 1000))/np.log(2) - with torch.no_grad(): - batch_size= X.shape[0] - # print(self.minibatch, int(self.minibatch * 10 / offset), batch_size) - start_time = time() - if (self.minibatch*10//offset< batch_size): - x = X[torch.randint(0, batch_size,[int(self.minibatch*10/offset)])].to(self.device) - else: - x = X.to(self.device) - # print(x.device) - self.centroids = _kpp(x, self.n_clusters, min(int(self.minibatch/12/offset),batch_size)) - del x - torch.cuda.empty_cache() - # self.centroids = self.centroids.to(self.device) - num_points_in_clusters = torch.ones(self.n_clusters, device=self.device, dtype=X.dtype)#全1 - closest = None#[3098036]#int64 - if(self.minibatch>=batch_size//2 and self.minibatch=batch_size): - X=X.to(self.device) - for i in range(self.max_iter): - iter_time = time() - if self.minibatch= 2: - print('iter:', i, 'error:', error.item(), 'time spent:', round(time()-iter_time, 4)) - if error <= self.tol: - break - - if self.verbose >= 1: - print(f'used {i+1} iterations ({round(time()-start_time, 4)}s) to cluster {batch_size} items into {self.n_clusters} clusters') - return closest diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/transforms/custom_transform.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/transforms/custom_transform.py deleted file mode 100644 index 654d65d97d90a66e45b414bb878b13ba9f64e70a..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/transforms/custom_transform.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# Modified by Xingyi Zhou -# File: transform.py - -import numpy as np -import torch -import torch.nn.functional as F -from fvcore.transforms.transform import ( - CropTransform, - HFlipTransform, - NoOpTransform, - Transform, - TransformList, -) -from PIL import Image - -try: - import cv2 # noqa -except ImportError: - # OpenCV is an optional dependency at the moment - pass - -__all__ = [ - "EfficientDetResizeCropTransform", -] - - -class EfficientDetResizeCropTransform(Transform): - """ - """ - - def __init__(self, scaled_h, scaled_w, offset_y, offset_x, img_scale, target_size, interp=None): - """ - Args: - h, w (int): original image size - new_h, new_w (int): new image size - interp: PIL interpolation methods, defaults to bilinear. - """ - # TODO decide on PIL vs opencv - super().__init__() - if interp is None: - interp = Image.BILINEAR - self._set_attributes(locals()) - - def apply_image(self, img, interp=None): - # assert img.shape[:2] == (self.h, self.w) - assert len(img.shape) <= 4 - - if img.dtype == np.uint8: - pil_image = Image.fromarray(img) - interp_method = interp if interp is not None else self.interp - pil_image = pil_image.resize((self.scaled_w, self.scaled_h), interp_method) - ret = np.asarray(pil_image) - right = min(self.scaled_w, self.offset_x + self.target_size[1]) - lower = min(self.scaled_h, self.offset_y + self.target_size[0]) - # img = img.crop((self.offset_x, self.offset_y, right, lower)) - if len(ret.shape) <= 3: - ret = ret[self.offset_y: lower, self.offset_x: right] - else: - ret = ret[..., self.offset_y: lower, self.offset_x: right, :] - else: - # PIL only supports uint8 - img = torch.from_numpy(img) - shape = list(img.shape) - shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:] - img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw - _PIL_RESIZE_TO_INTERPOLATE_MODE = {Image.BILINEAR: "bilinear", Image.BICUBIC: "bicubic"} - mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[self.interp] - img = F.interpolate(img, (self.scaled_h, self.scaled_w), mode=mode, align_corners=False) - shape[:2] = (self.scaled_h, self.scaled_w) - ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c) - right = min(self.scaled_w, self.offset_x + self.target_size[1]) - lower = min(self.scaled_h, self.offset_y + self.target_size[0]) - if len(ret.shape) <= 3: - ret = ret[self.offset_y: lower, self.offset_x: right] - else: - ret = ret[..., self.offset_y: lower, self.offset_x: right, :] - return ret - - def apply_coords(self, coords): - coords[:, 0] = coords[:, 0] * self.img_scale - coords[:, 1] = coords[:, 1] * self.img_scale - coords[:, 0] -= self.offset_x - coords[:, 1] -= self.offset_y - return coords - - def apply_segmentation(self, segmentation): - segmentation = self.apply_image(segmentation, interp=Image.NEAREST) - return segmentation - - def inverse(self): - raise NotImplementedError - # return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp) \ No newline at end of file diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tools/deploy/torchscript_mask_rcnn.cpp b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tools/deploy/torchscript_mask_rcnn.cpp deleted file mode 100644 index b40f13b81f601788847992e6627b448d62a287e2..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tools/deploy/torchscript_mask_rcnn.cpp +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -// @lint-ignore-every CLANGTIDY -// This is an example code that demonstrates how to run inference -// with a torchscript format Mask R-CNN model exported by ./export_model.py -// using export method=tracing, caffe2_tracing & scripting. - -#include -#include -#include - -#include -#include -#include -#include - -// only needed for export_method=tracing -#include // @oss-only -// @fb-only: #include - -using namespace std; - -c10::IValue get_caffe2_tracing_inputs(cv::Mat& img, c10::Device device) { - const int height = img.rows; - const int width = img.cols; - // FPN models require divisibility of 32. - // Tracing mode does padding inside the graph, but caffe2_tracing does not. - assert(height % 32 == 0 && width % 32 == 0); - const int channels = 3; - - auto input = - torch::from_blob(img.data, {1, height, width, channels}, torch::kUInt8); - // NHWC to NCHW - input = input.to(device, torch::kFloat).permute({0, 3, 1, 2}).contiguous(); - - std::array im_info_data{height * 1.0f, width * 1.0f, 1.0f}; - auto im_info = - torch::from_blob(im_info_data.data(), {1, 3}).clone().to(device); - return std::make_tuple(input, im_info); -} - -c10::IValue get_tracing_inputs(cv::Mat& img, c10::Device device) { - const int height = img.rows; - const int width = img.cols; - const int channels = 3; - - auto input = - torch::from_blob(img.data, {height, width, channels}, torch::kUInt8); - // HWC to CHW - input = input.to(device, torch::kFloat).permute({2, 0, 1}).contiguous(); - return input; -} - -// create a Tuple[Dict[str, Tensor]] which is the input type of scripted model -c10::IValue get_scripting_inputs(cv::Mat& img, c10::Device device) { - const int height = img.rows; - const int width = img.cols; - const int channels = 3; - - auto img_tensor = - torch::from_blob(img.data, {height, width, channels}, torch::kUInt8); - // HWC to CHW - img_tensor = - img_tensor.to(device, torch::kFloat).permute({2, 0, 1}).contiguous(); - auto dic = c10::Dict(); - dic.insert("image", img_tensor); - return std::make_tuple(dic); -} - -c10::IValue -get_inputs(std::string export_method, cv::Mat& img, c10::Device device) { - // Given an image, create inputs in the format required by the model. - if (export_method == "tracing") - return get_tracing_inputs(img, device); - if (export_method == "caffe2_tracing") - return get_caffe2_tracing_inputs(img, device); - if (export_method == "scripting") - return get_scripting_inputs(img, device); - abort(); -} - -struct MaskRCNNOutputs { - at::Tensor pred_boxes, pred_classes, pred_masks, scores; - int num_instances() const { - return pred_boxes.sizes()[0]; - } -}; - -MaskRCNNOutputs get_outputs(std::string export_method, c10::IValue outputs) { - // Given outputs of the model, extract tensors from it to turn into a - // common MaskRCNNOutputs format. - if (export_method == "tracing") { - auto out_tuple = outputs.toTuple()->elements(); - // They are ordered alphabetically by their field name in Instances - return MaskRCNNOutputs{ - out_tuple[0].toTensor(), - out_tuple[1].toTensor(), - out_tuple[2].toTensor(), - out_tuple[3].toTensor()}; - } - if (export_method == "caffe2_tracing") { - auto out_tuple = outputs.toTuple()->elements(); - // A legacy order used by caffe2 models - return MaskRCNNOutputs{ - out_tuple[0].toTensor(), - out_tuple[2].toTensor(), - out_tuple[3].toTensor(), - out_tuple[1].toTensor()}; - } - if (export_method == "scripting") { - // With the ScriptableAdapter defined in export_model.py, the output is - // List[Dict[str, Any]]. - auto out_dict = outputs.toList().get(0).toGenericDict(); - return MaskRCNNOutputs{ - out_dict.at("pred_boxes").toTensor(), - out_dict.at("pred_classes").toTensor(), - out_dict.at("pred_masks").toTensor(), - out_dict.at("scores").toTensor()}; - } - abort(); -} - -int main(int argc, const char* argv[]) { - if (argc != 4) { - cerr << R"xx( -Usage: - ./torchscript_mask_rcnn model.ts input.jpg EXPORT_METHOD - - EXPORT_METHOD can be "tracing", "caffe2_tracing" or "scripting". -)xx"; - return 1; - } - std::string image_file = argv[2]; - std::string export_method = argv[3]; - assert( - export_method == "caffe2_tracing" || export_method == "tracing" || - export_method == "scripting"); - - torch::jit::getBailoutDepth() = 1; - torch::autograd::AutoGradMode guard(false); - auto module = torch::jit::load(argv[1]); - - assert(module.buffers().size() > 0); - // Assume that the entire model is on the same device. - // We just put input to this device. - auto device = (*begin(module.buffers())).device(); - - cv::Mat input_img = cv::imread(image_file, cv::IMREAD_COLOR); - auto inputs = get_inputs(export_method, input_img, device); - - // Run the network - auto output = module.forward({inputs}); - if (device.is_cuda()) - c10::cuda::getCurrentCUDAStream().synchronize(); - - // run 3 more times to benchmark - int N_benchmark = 3, N_warmup = 1; - auto start_time = chrono::high_resolution_clock::now(); - for (int i = 0; i < N_benchmark + N_warmup; ++i) { - if (i == N_warmup) - start_time = chrono::high_resolution_clock::now(); - output = module.forward({inputs}); - if (device.is_cuda()) - c10::cuda::getCurrentCUDAStream().synchronize(); - } - auto end_time = chrono::high_resolution_clock::now(); - auto ms = chrono::duration_cast(end_time - start_time) - .count(); - cout << "Latency (should vary with different inputs): " - << ms * 1.0 / 1e6 / N_benchmark << " seconds" << endl; - - // Parse Mask R-CNN outputs - auto rcnn_outputs = get_outputs(export_method, output); - cout << "Number of detected objects: " << rcnn_outputs.num_instances() - << endl; - - cout << "pred_boxes: " << rcnn_outputs.pred_boxes.toString() << " " - << rcnn_outputs.pred_boxes.sizes() << endl; - cout << "scores: " << rcnn_outputs.scores.toString() << " " - << rcnn_outputs.scores.sizes() << endl; - cout << "pred_classes: " << rcnn_outputs.pred_classes.toString() << " " - << rcnn_outputs.pred_classes.sizes() << endl; - cout << "pred_masks: " << rcnn_outputs.pred_masks.toString() << " " - << rcnn_outputs.pred_masks.sizes() << endl; - - cout << rcnn_outputs.pred_boxes << endl; - return 0; -} diff --git a/spaces/yuhanbo/chat-gpt/app/api/chat-stream/route.ts b/spaces/yuhanbo/chat-gpt/app/api/chat-stream/route.ts deleted file mode 100644 index e7fe97fea79aae5258df2b6e9fae54f21a5ae2c0..0000000000000000000000000000000000000000 --- a/spaces/yuhanbo/chat-gpt/app/api/chat-stream/route.ts +++ /dev/null @@ -1,66 +0,0 @@ -import { createParser } from "eventsource-parser"; -import { NextRequest } from "next/server"; - -async function createStream(req: NextRequest) { - const encoder = new TextEncoder(); - const decoder = new TextDecoder(); - - let apiKey = process.env.OPENAI_API_KEY; - - const userApiKey = req.headers.get("token"); - if (userApiKey) { - apiKey = userApiKey; - console.log("[Stream] using user api key:" + apiKey); - } - - const res = await fetch("https://api.openai.com/v1/chat/completions", { - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${apiKey}`, - }, - method: "POST", - body: req.body, - }); - - const stream = new ReadableStream({ - async start(controller) { - function onParse(event: any) { - if (event.type === "event") { - const data = event.data; - // https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream - if (data === "[DONE]") { - controller.close(); - return; - } - try { - const json = JSON.parse(data); - const text = json.choices[0].delta.content; - const queue = encoder.encode(text); - controller.enqueue(queue); - } catch (e) { - controller.error(e); - } - } - } - - const parser = createParser(onParse); - for await (const chunk of res.body as any) { - parser.feed(decoder.decode(chunk)); - } - }, - }); - return stream; -} - -export async function POST(req: NextRequest) { - try { - const stream = await createStream(req); - return new Response(stream); - } catch (error) { - console.error("[Chat Stream]", error); - } -} - -export const config = { - runtime: "edge", -}; diff --git a/spaces/yukiiiwasneverhere/yuki/README.md b/spaces/yukiiiwasneverhere/yuki/README.md deleted file mode 100644 index 48ca0e1c404f21bab04da9f0d1a9bcb6f4c98dfd..0000000000000000000000000000000000000000 --- a/spaces/yukiiiwasneverhere/yuki/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Yuki -emoji: 📊 -colorFrom: pink -colorTo: indigo -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/modules/enhancer.py b/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/modules/enhancer.py deleted file mode 100644 index 37676311f7d8dc4ddc2a5244dedc27b2437e04f5..0000000000000000000000000000000000000000 --- a/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/modules/enhancer.py +++ /dev/null @@ -1,105 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from vdecoder.nsf_hifigan.nvSTFT import STFT -from vdecoder.nsf_hifigan.models import load_model -from torchaudio.transforms import Resample - -class Enhancer: - def __init__(self, enhancer_type, enhancer_ckpt, device=None): - if device is None: - device = 'cuda' if torch.cuda.is_available() else 'cpu' - self.device = device - - if enhancer_type == 'nsf-hifigan': - self.enhancer = NsfHifiGAN(enhancer_ckpt, device=self.device) - else: - raise ValueError(f" [x] Unknown enhancer: {enhancer_type}") - - self.resample_kernel = {} - self.enhancer_sample_rate = self.enhancer.sample_rate() - self.enhancer_hop_size = self.enhancer.hop_size() - - def enhance(self, - audio, # 1, T - sample_rate, - f0, # 1, n_frames, 1 - hop_size, - adaptive_key = 0, - silence_front = 0 - ): - # enhancer start time - start_frame = int(silence_front * sample_rate / hop_size) - real_silence_front = start_frame * hop_size / sample_rate - audio = audio[:, int(np.round(real_silence_front * sample_rate)) : ] - f0 = f0[: , start_frame :, :] - - # adaptive parameters - adaptive_factor = 2 ** ( -adaptive_key / 12) - adaptive_sample_rate = 100 * int(np.round(self.enhancer_sample_rate / adaptive_factor / 100)) - real_factor = self.enhancer_sample_rate / adaptive_sample_rate - - # resample the ddsp output - if sample_rate == adaptive_sample_rate: - audio_res = audio - else: - key_str = str(sample_rate) + str(adaptive_sample_rate) - if key_str not in self.resample_kernel: - self.resample_kernel[key_str] = Resample(sample_rate, adaptive_sample_rate, lowpass_filter_width = 128).to(self.device) - audio_res = self.resample_kernel[key_str](audio) - - n_frames = int(audio_res.size(-1) // self.enhancer_hop_size + 1) - - # resample f0 - f0_np = f0.squeeze(0).squeeze(-1).cpu().numpy() - f0_np *= real_factor - time_org = (hop_size / sample_rate) * np.arange(len(f0_np)) / real_factor - time_frame = (self.enhancer_hop_size / self.enhancer_sample_rate) * np.arange(n_frames) - f0_res = np.interp(time_frame, time_org, f0_np, left=f0_np[0], right=f0_np[-1]) - f0_res = torch.from_numpy(f0_res).unsqueeze(0).float().to(self.device) # 1, n_frames - - # enhance - enhanced_audio, enhancer_sample_rate = self.enhancer(audio_res, f0_res) - - # resample the enhanced output - if adaptive_factor != 0: - key_str = str(adaptive_sample_rate) + str(enhancer_sample_rate) - if key_str not in self.resample_kernel: - self.resample_kernel[key_str] = Resample(adaptive_sample_rate, enhancer_sample_rate, lowpass_filter_width = 128).to(self.device) - enhanced_audio = self.resample_kernel[key_str](enhanced_audio) - - # pad the silence frames - if start_frame > 0: - enhanced_audio = F.pad(enhanced_audio, (int(np.round(enhancer_sample_rate * real_silence_front)), 0)) - - return enhanced_audio, enhancer_sample_rate - - -class NsfHifiGAN(torch.nn.Module): - def __init__(self, model_path, device=None): - super().__init__() - if device is None: - device = 'cuda' if torch.cuda.is_available() else 'cpu' - self.device = device - print('| Load HifiGAN: ', model_path) - self.model, self.h = load_model(model_path, device=self.device) - - def sample_rate(self): - return self.h.sampling_rate - - def hop_size(self): - return self.h.hop_size - - def forward(self, audio, f0): - stft = STFT( - self.h.sampling_rate, - self.h.num_mels, - self.h.n_fft, - self.h.win_size, - self.h.hop_size, - self.h.fmin, - self.h.fmax) - with torch.no_grad(): - mel = stft.get_mel(audio) - enhanced_audio = self.model(mel, f0[:,:mel.size(-1)]).view(-1) - return enhanced_audio, self.h.sampling_rate \ No newline at end of file diff --git a/spaces/zhang-wei-jian/docker/node_modules/ee-first/README.md b/spaces/zhang-wei-jian/docker/node_modules/ee-first/README.md deleted file mode 100644 index cbd2478beffb7e4e612f99e8bff383255c21f253..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/ee-first/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# EE First - -[![NPM version][npm-image]][npm-url] -[![Build status][travis-image]][travis-url] -[![Test coverage][coveralls-image]][coveralls-url] -[![License][license-image]][license-url] -[![Downloads][downloads-image]][downloads-url] -[![Gittip][gittip-image]][gittip-url] - -Get the first event in a set of event emitters and event pairs, -then clean up after itself. - -## Install - -```sh -$ npm install ee-first -``` - -## API - -```js -var first = require('ee-first') -``` - -### first(arr, listener) - -Invoke `listener` on the first event from the list specified in `arr`. `arr` is -an array of arrays, with each array in the format `[ee, ...event]`. `listener` -will be called only once, the first time any of the given events are emitted. If -`error` is one of the listened events, then if that fires first, the `listener` -will be given the `err` argument. - -The `listener` is invoked as `listener(err, ee, event, args)`, where `err` is the -first argument emitted from an `error` event, if applicable; `ee` is the event -emitter that fired; `event` is the string event name that fired; and `args` is an -array of the arguments that were emitted on the event. - -```js -var ee1 = new EventEmitter() -var ee2 = new EventEmitter() - -first([ - [ee1, 'close', 'end', 'error'], - [ee2, 'error'] -], function (err, ee, event, args) { - // listener invoked -}) -``` - -#### .cancel() - -The group of listeners can be cancelled before being invoked and have all the event -listeners removed from the underlying event emitters. - -```js -var thunk = first([ - [ee1, 'close', 'end', 'error'], - [ee2, 'error'] -], function (err, ee, event, args) { - // listener invoked -}) - -// cancel and clean up -thunk.cancel() -``` - -[npm-image]: https://img.shields.io/npm/v/ee-first.svg?style=flat-square -[npm-url]: https://npmjs.org/package/ee-first -[github-tag]: http://img.shields.io/github/tag/jonathanong/ee-first.svg?style=flat-square -[github-url]: https://github.com/jonathanong/ee-first/tags -[travis-image]: https://img.shields.io/travis/jonathanong/ee-first.svg?style=flat-square -[travis-url]: https://travis-ci.org/jonathanong/ee-first -[coveralls-image]: https://img.shields.io/coveralls/jonathanong/ee-first.svg?style=flat-square -[coveralls-url]: https://coveralls.io/r/jonathanong/ee-first?branch=master -[license-image]: http://img.shields.io/npm/l/ee-first.svg?style=flat-square -[license-url]: LICENSE.md -[downloads-image]: http://img.shields.io/npm/dm/ee-first.svg?style=flat-square -[downloads-url]: https://npmjs.org/package/ee-first -[gittip-image]: https://img.shields.io/gittip/jonathanong.svg?style=flat-square -[gittip-url]: https://www.gittip.com/jonathanong/ diff --git a/spaces/zhaoys/wfms-kuiwenc/src/components/chat-scroll-anchor.tsx b/spaces/zhaoys/wfms-kuiwenc/src/components/chat-scroll-anchor.tsx deleted file mode 100644 index ac809f4486a48e134cb69314c3d0dae5e68d614e..0000000000000000000000000000000000000000 --- a/spaces/zhaoys/wfms-kuiwenc/src/components/chat-scroll-anchor.tsx +++ /dev/null @@ -1,29 +0,0 @@ -'use client' - -import * as React from 'react' -import { useInView } from 'react-intersection-observer' - -import { useAtBottom } from '@/lib/hooks/use-at-bottom' - -interface ChatScrollAnchorProps { - trackVisibility?: boolean -} - -export function ChatScrollAnchor({ trackVisibility }: ChatScrollAnchorProps) { - const isAtBottom = useAtBottom() - const { ref, entry, inView } = useInView({ - trackVisibility, - delay: 100, - rootMargin: '0px 0px -150px 0px' - }) - - React.useEffect(() => { - if (isAtBottom && trackVisibility && !inView) { - entry?.target.scrollIntoView({ - block: 'start' - }) - } - }, [inView, entry, isAtBottom, trackVisibility]) - - return
                    -} diff --git a/spaces/zideliu/styledrop/timm/optim/novograd.py b/spaces/zideliu/styledrop/timm/optim/novograd.py deleted file mode 100644 index 4137c6aa9406360d29f5f7234ebbdef294404d0e..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/timm/optim/novograd.py +++ /dev/null @@ -1,77 +0,0 @@ -"""NovoGrad Optimizer. -Original impl by Masashi Kimura (Convergence Lab): https://github.com/convergence-lab/novograd -Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks` - - https://arxiv.org/abs/1905.11286 -""" - -import torch -from torch.optim.optimizer import Optimizer -import math - - -class NovoGrad(Optimizer): - def __init__(self, params, grad_averaging=False, lr=0.1, betas=(0.95, 0.98), eps=1e-8, weight_decay=0): - defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) - super(NovoGrad, self).__init__(params, defaults) - self._lr = lr - self._beta1 = betas[0] - self._beta2 = betas[1] - self._eps = eps - self._wd = weight_decay - self._grad_averaging = grad_averaging - - self._momentum_initialized = False - - def step(self, closure=None): - loss = None - if closure is not None: - loss = closure() - - if not self._momentum_initialized: - for group in self.param_groups: - for p in group['params']: - if p.grad is None: - continue - state = self.state[p] - grad = p.grad.data - if grad.is_sparse: - raise RuntimeError('NovoGrad does not support sparse gradients') - - v = torch.norm(grad)**2 - m = grad/(torch.sqrt(v) + self._eps) + self._wd * p.data - state['step'] = 0 - state['v'] = v - state['m'] = m - state['grad_ema'] = None - self._momentum_initialized = True - - for group in self.param_groups: - for p in group['params']: - if p.grad is None: - continue - state = self.state[p] - state['step'] += 1 - - step, v, m = state['step'], state['v'], state['m'] - grad_ema = state['grad_ema'] - - grad = p.grad.data - g2 = torch.norm(grad)**2 - grad_ema = g2 if grad_ema is None else grad_ema * \ - self._beta2 + g2 * (1. - self._beta2) - grad *= 1.0 / (torch.sqrt(grad_ema) + self._eps) - - if self._grad_averaging: - grad *= (1. - self._beta1) - - g2 = torch.norm(grad)**2 - v = self._beta2*v + (1. - self._beta2)*g2 - m = self._beta1*m + (grad / (torch.sqrt(v) + self._eps) + self._wd * p.data) - bias_correction1 = 1 - self._beta1 ** step - bias_correction2 = 1 - self._beta2 ** step - step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 - - state['v'], state['m'] = v, m - state['grad_ema'] = grad_ema - p.data.add_(-step_size, m) - return loss diff --git a/spaces/zkunn/Alipay_Gradio_theme/app.py b/spaces/zkunn/Alipay_Gradio_theme/app.py deleted file mode 100644 index 1ac03a619673975dfa42b32295b1a71446842669..0000000000000000000000000000000000000000 --- a/spaces/zkunn/Alipay_Gradio_theme/app.py +++ /dev/null @@ -1,145 +0,0 @@ -import time - -import gradio as gr -from gradio.themes.utils.theme_dropdown import create_theme_dropdown - -dropdown, js = create_theme_dropdown() - -with gr.Blocks(theme='zkunn/Alipay_Gradio_theme') as demo: - with gr.Row().style(equal_height=True): - with gr.Column(scale=10): - gr.Markdown( - """ - # Theme preview: `Alipay_Gradio_theme` - To use this theme, set `theme='zkunn/Alipay_Gradio_theme'` in `gr.Blocks()` or `gr.Interface()`. - You can append an `@` and a semantic version expression, e.g. @>=1.0.0,<2.0.0 to pin to a given version - of this theme. - """ - ) - with gr.Column(scale=3): - with gr.Box(): - dropdown.render() - toggle_dark = gr.Button(value="Toggle Dark").style(full_width=True) - - dropdown.change(None, dropdown, None, _js=js) - toggle_dark.click( - None, - _js=""" - () => { - document.body.classList.toggle('dark'); - } - """, - ) - - name = gr.Textbox( - label="Name", - info="Full name, including middle name. No special characters.", - placeholder="John Doe", - value="John Doe", - interactive=True, - ) - - with gr.Row(): - slider1 = gr.Slider(label="Slider 1") - slider2 = gr.Slider(label="Slider 2") - gr.CheckboxGroup(["A", "B", "C"], label="Checkbox Group") - - with gr.Row(): - with gr.Column(variant="panel", scale=1): - gr.Markdown("## Panel 1") - radio = gr.Radio( - ["A", "B", "C"], - label="Radio", - info="Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.", - ) - drop = gr.Dropdown(["Option 1", "Option 2", "Option 3"], show_label=False) - drop_2 = gr.Dropdown( - ["Option A", "Option B", "Option C"], - multiselect=True, - value=["Option A"], - label="Dropdown", - interactive=True, - ) - check = gr.Checkbox(label="Go") - with gr.Column(variant="panel", scale=2): - img = gr.Image( - "https://gradio.app/assets/img/header-image.jpg", label="Image" - ).style(height=320) - with gr.Row(): - go_btn = gr.Button("Go", label="Primary Button", variant="primary") - clear_btn = gr.Button( - "Clear", label="Secondary Button", variant="secondary" - ) - - def go(*args): - time.sleep(3) - return "https://gradio.app/assets/img/header-image.jpg" - - go_btn.click(go, [radio, drop, drop_2, check, name], img, api_name="go") - - def clear(): - time.sleep(0.2) - return None - - clear_btn.click(clear, None, img) - - with gr.Row(): - btn1 = gr.Button("Button 1").style(size="sm") - btn2 = gr.UploadButton().style(size="sm") - stop_btn = gr.Button("Stop", label="Stop Button", variant="stop").style( - size="sm" - ) - - with gr.Row(): - gr.Dataframe(value=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], label="Dataframe") - gr.JSON( - value={"a": 1, "b": 2, "c": {"test": "a", "test2": [1, 2, 3]}}, label="JSON" - ) - gr.Label(value={"cat": 0.7, "dog": 0.2, "fish": 0.1}) - gr.File() - with gr.Row(): - gr.ColorPicker() - gr.Video("https://gradio-static-files.s3.us-west-2.amazonaws.com/world.mp4") - gr.Gallery( - [ - ( - "https://gradio-static-files.s3.us-west-2.amazonaws.com/lion.jpg", - "lion", - ), - ( - "https://gradio-static-files.s3.us-west-2.amazonaws.com/logo.png", - "logo", - ), - ( - "https://gradio-static-files.s3.us-west-2.amazonaws.com/tower.jpg", - "tower", - ), - ] - ).style(height="200px", grid=2) - - with gr.Row(): - with gr.Column(scale=2): - chatbot = gr.Chatbot([("Hello", "Hi")], label="Chatbot") - chat_btn = gr.Button("Add messages") - - def chat(history): - time.sleep(2) - yield [["How are you?", "I am good."]] - - chat_btn.click( - lambda history: history - + [["How are you?", "I am good."]] - + (time.sleep(2) or []), - chatbot, - chatbot, - ) - with gr.Column(scale=1): - with gr.Accordion("Advanced Settings"): - gr.Markdown("Hello") - gr.Number(label="Chatbot control 1") - gr.Number(label="Chatbot control 2") - gr.Number(label="Chatbot control 3") - - -if __name__ == "__main__": - demo.queue().launch() diff --git a/spaces/zomehwh/vits-uma-genshin-honkai/app.py b/spaces/zomehwh/vits-uma-genshin-honkai/app.py deleted file mode 100644 index ba29f6a5aff153461017c2e11e03a8765581c0d5..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/vits-uma-genshin-honkai/app.py +++ /dev/null @@ -1,150 +0,0 @@ -# coding=utf-8 -import time -import os -import gradio as gr -import utils -import argparse -import commons -from models import SynthesizerTrn -from text import text_to_sequence -import torch -from torch import no_grad, LongTensor -import webbrowser -import logging -import gradio.processing_utils as gr_processing_utils -logging.getLogger('numba').setLevel(logging.WARNING) -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - -audio_postprocess_ori = gr.Audio.postprocess -def audio_postprocess(self, y): - data = audio_postprocess_ori(self, y) - if data is None: - return None - return gr_processing_utils.encode_url_or_file_to_base64(data["name"]) -gr.Audio.postprocess = audio_postprocess - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 100 and limitation: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0).to(device) - x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device) - speaker_id = LongTensor([speaker_id]).to(device) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument('--api', action="store_true", default=False) - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - parser.add_argument("--colab", action="store_true", default=False, help="share gradio app") - args = parser.parse_args() - device = torch.device(args.device) - - hps_ms = utils.get_hparams_from_file(r'./model/config.json') - net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model) - _ = net_g_ms.eval().to(device) - speakers = hps_ms.speakers - model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - - with gr.Blocks() as app: - gr.Markdown( - "#
                    VITS语音在线合成demo\n" - "#
                    严禁将模型用于任何商业项目,否则后果自负\n" - "
                    主要有赛马娘,原神中文,原神日语,崩坏3的音色
                    " - '' - '' - ) - - with gr.Tabs(): - with gr.TabItem("vits"): - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation) " if limitation else "Text", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text") - lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文") - btn = gr.Button(value="Submit") - with gr.Row(): - search = gr.Textbox(label="Search Speaker", lines=1) - btn2 = gr.Button(value="Search") - sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228]) - with gr.Row(): - ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio") - o3 = gr.Textbox(label="Extra Info") - download = gr.Button("Download Audio") - btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3]) - download.click(None, [], [], _js=download_audio_js.format()) - btn2.click(search_speaker, inputs=[search], outputs=[sid]) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - with gr.TabItem("可用人物一览"): - gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index") - if args.colab: - webbrowser.open("http://127.0.0.1:7860") - app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)